python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stateful_random_ops.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import values as dist_values from tensorflow.python.distribute.mirrored_strategy import MirroredStrategy from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.kernel_tests.random import util as \ random_test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_random_ops from tensorflow.python.ops import gen_stateful_random_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import stateful_random_ops as \ random from tensorflow.python.ops import variables from tensorflow.python.platform import test g_seeded = None g_unseeded = None GPU_FLOATS = [dtypes.float16, dtypes.float32, dtypes.float64] CPU_FLOATS = GPU_FLOATS + [dtypes.bfloat16] FLOATS = GPU_FLOATS INTS = [dtypes.int32, dtypes.int64] class StatefulRandomOpsTest(test.TestCase, parameterized.TestCase): def testCreateRNGStateIntSeed(self): """Tests `create_rng_state` when `seed` is int.""" # using leading 'F' to test overflow tolerance state = random.create_rng_state(0xFFFF222233334444FFAA666677778888, random.RNG_ALG_PHILOX) self.assertAllEqual( list(map(random._uint_to_int, [0xFFAA666677778888, 0xFFFF222233334444] + [0] * (random.PHILOX_STATE_SIZE - 2))), state) def assertAllDifferent(self, tensors): """Checks that there are no duplicate elements anywhere among the tensors. Args: tensors: a list of tensors. They can have different shapes. """ tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors] ls = array_ops.concat(tensors, axis=0).numpy().tolist() self.assertAllEqual(len(ls), len(set(ls))) @test_util.run_v2_only def testNonDeterministicInts(self): """Tests that non_deterministic_ints returns different results every time. This test is flaky, but with very low probability of failing. """ shape = [2, 3] dtype = dtypes.int64 a = random.non_deterministic_ints(shape=shape, dtype=dtype) self.assertAllEqual(shape, a.shape) self.assertEqual(dtype, a.dtype) b = random.non_deterministic_ints(shape, dtype=dtype) self.assertAllDifferent([a, b]) @test_util.run_v2_only def testBatchSeeds(self): """Test for batch seeds. """ shape = [2, 3] count = 6 gen = random.Generator.from_seed(1234) keys1 = gen._make_int64_keys(shape=shape) keys2 = gen._make_int64_keys(shape=shape) self.assertAllDifferent([keys1, keys2]) seeds1 = gen.make_seeds(count=count) seeds2 = gen.make_seeds(count=count) self.assertAllDifferent([seeds1[0, :], seeds2[0, :]]) gens = gen.split(count=count) self.assertAllEqual(count, len(gens)) randoms = [g.uniform_full_int(shape=shape, dtype=dtypes.int32) for g in gens] self.assertAllDifferent(randoms) # Tests graph mode. @def_function.function def f(): return gen.make_seeds(count=count) for _ in range(3): f() def assertRegex(self, pattern, text): self.assertTrue( re.search(pattern, text), "Can't find pattern '%s' in text '%s'" % (pattern, text)) @test_util.run_v2_only @test_util.run_cuda_only def testCrossDeviceSplit(self): """Tests that a CPU RNG can split into RNGs on GPU. """ with ops.device("/device:CPU:0"): gen = random.Generator.from_seed(1234) # gen is on CPU self.assertRegex("CPU", gen.state.device) with ops.device(test_util.gpu_device_name()): gens = gen.split(count=10) # gens are on GPU self.assertRegex("GPU", gens[0].state.device) @test_util.run_v2_only def testReset(self): shape = [2, 3] gen = random.Generator.from_seed(0) for resetter in [ lambda g: g.reset(state=[1, 2, 3]), lambda g: g.reset_from_seed(1234), lambda g: g.reset_from_key_counter(key=1, counter=[2, 3]), ]: resetter(gen) expected_normal = gen.normal(shape) @def_function.function def f(resetter): resetter(gen) return gen.normal(shape) def check_results(expected_normal, v): self.assertAllEqual(expected_normal, v) check_results(expected_normal, f(resetter)) check_results(expected_normal, f(resetter)) @test_util.run_v2_only def testGeneratorCreation(self): """Tests generator creation, in both eager and tf.function. The interaction between Generator creation and defun should be the same as tf.Variable. """ shape = [2, 3] alg = random.RNG_ALG_PHILOX for constructor in [ lambda: random.Generator(state=[1, 2, 3], alg=alg), lambda: random.Generator.from_seed(1234), lambda: random.Generator.from_key_counter( # pylint: disable=g-long-lambda key=1, counter=[2, 3], alg=alg), ]: gen = constructor() # Tests tf.function expected_normal1 = gen.normal(shape) expected_normal2 = gen.normal(shape) global g_seeded g_seeded = None @def_function.function def f(constructor): global g_seeded # defun'ed function should only create variables once if g_seeded is None: g_seeded = constructor() return g_seeded.normal(shape) def check_results(expected_normal, v): self.assertAllEqual(expected_normal, v) check_results(expected_normal1, f(constructor)) check_results(expected_normal2, f(constructor)) @test_util.run_v2_only def testGeneratorCreationUnseeded(self): """Tests generator creation, the unseeded case.""" shape = [2, 3] global g_unseeded g_unseeded = None @def_function.function def f(): global g_unseeded # defun'ed function should only create variables once if g_unseeded is None: g_unseeded = random.Generator.from_non_deterministic_state() return g_unseeded.normal(shape) self.assertAllEqual(shape, f().shape) @test_util.run_v2_only def testGeneratorCopy(self): """Tests copying a generator.""" g = random.Generator.from_seed(0) g_copy = random.Generator(g) self.assertAllEqual(g.algorithm, g_copy.algorithm) self.assertAllEqual(g.state.read_value(), g_copy.state.read_value()) # Tests tf.function global g_seeded g_seeded = None # Do the same in tf.function @def_function.function def f(): global g_seeded # defun'ed function should only create variables once if g_seeded is None: g_seeded = random.Generator(g) self.assertAllEqual(g.algorithm, g_seeded.algorithm) self.assertAllEqual(g.state.read_value(), g_seeded.state.read_value()) f() @test_util.run_v1_only( ("This test is specifically for checking TF1 compatibility. " "It cannot run under TF2.")) def testTF1(self): seed = 1234 shape = [2, 3] expected_normal1 = constant_op.constant( [[0.9356609, 1.0854305, -0.93788373], [-0.50615472, 1.31697023, 0.71375787]], dtype=dtypes.float32) expected_normal2 = constant_op.constant( [[-0.3964749, 0.8369565, -0.30946946], [1.1206646, 1.00852597, -0.10185789]], dtype=dtypes.float32) with self.cached_session() as sess: gen1 = random.Generator.from_seed(seed) gen2 = random.Generator.from_non_deterministic_state() sess.run((gen1._state_var.initializer, gen2._state_var.initializer)) r1 = gen1.normal(shape, dtype=dtypes.float32) r2 = gen2.normal(shape, dtype=dtypes.float32) def f(): return sess.run((r1, r2)) def check_results(expected_normal, v1, v2): self.assertAllClose(expected_normal, v1, rtol=1e-5, atol=1e-5) self.assertAllEqual(shape, v2.shape) check_results(expected_normal1, *f()) check_results(expected_normal2, *f()) @test_util.run_v2_only @test_util.also_run_as_tf_function def testEagerAndDefun(self): """A simple test to make sure the op works in eager and defunned mode.""" random.get_global_generator().normal((3,)) @test_util.run_v2_only def testOpSeedSelectionAfterSetSeed(self): """Tests that op-seed selection is reset after reseting global generator. Fixing GitHub issue 9171: https://github.com/tensorflow/tensorflow/issues/9171 """ shape = (3,) random.get_global_generator().reset_from_seed(1) a = random.get_global_generator().normal(shape) random.get_global_generator().reset_from_seed(1) b = random.get_global_generator().normal(shape) self.assertAllEqual(a, b) # Now do the above again using accelerated ('defun'ed) computation @def_function.function def f(): return random.get_global_generator().normal(shape) random.get_global_generator().reset_from_seed(1) c = f() random.get_global_generator().reset_from_seed(1) d = f() self.assertAllEqual(c, d) self.assertAllEqual(a, c) @test_util.run_v2_only def testOpSeedSelectionNotSensitive(self): """Test that op-seed selection is not sensitive to trivial changes. Test that op-seed selection is not sensitive to trivial computation (i.e. graph) changes. Fixing b/32087099 """ def f(include_print): shape = constant_op.constant([5]) if include_print: shape = logging_ops.Print(shape, [shape]) return random.get_global_generator().normal(shape) def compare(fst_includes_print, snd_includes_print): random.get_global_generator().reset_from_seed(50) fst = f(fst_includes_print) random.get_global_generator().reset_from_seed(50) snd = f(snd_includes_print) self.assertAllEqual(fst, snd) # Now do the above again using accelerated (defunned) 'f'. # Running 'f' with two different Boolean arguments should cause # two different graphs to be generated, hence demonstrating the # insensitivity to graph changes. f_acc = def_function.function(f) random.get_global_generator().reset_from_seed(50) fst = f_acc(fst_includes_print) random.get_global_generator().reset_from_seed(50) snd = f_acc(snd_includes_print) self.assertAllEqual(fst, snd) compare(False, False) compare(True, True) compare(True, False) @test_util.run_v2_only def testKey(self): key = 1234 gen = random.Generator(state=[0, 0, key], alg=random.RNG_ALG_PHILOX) got = gen.key self.assertAllEqual(key, got) @def_function.function def f(): return gen.key got = f() self.assertAllEqual(key, got) @test_util.run_v2_only def testSkip(self): key = 1234 counter = 5678 gen = random.Generator(state=[counter, 0, key], alg=random.RNG_ALG_PHILOX) delta = 432 gen.skip(delta) new_counter = gen._state_var[0] self.assertAllEqual(counter + delta * 256, new_counter) def _sameAsOldRandomOps(self, device, floats): def compare(dtype, old, new): seed1, seed2 = 79, 25 # note how the two seeds for the old op correspond to the seed for the new # op with ops.device(device): gen = random.Generator(state=[0, seed2, seed1], alg=random.RNG_ALG_PHILOX) # create a graph for the old op in order to call it many times @def_function.function def run_old(): with ops.device(device): return old(dtype, seed1, seed2) def run_new(): with ops.device(device): return new(dtype, gen) for _ in range(100): self.assertAllEqual(run_old(), run_new()) shape = constant_op.constant([4, 7]) minval = 128 maxval = 256 # passing `dtype` around to compress go/gpylint-faq#cell-var-from-loop and # go/gpylint-faq#undefined-loop-variable def old_normal(dtype, seed1, seed2): return gen_random_ops.random_standard_normal( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_normal(dtype, gen): return gen._standard_normal(shape, dtype=dtype) def old_truncated_normal(dtype, seed1, seed2): return gen_random_ops.truncated_normal( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_truncated_normal(dtype, gen): return gen._truncated_normal(shape, dtype=dtype) def old_uniform_int(dtype, seed1, seed2): minval2 = constant_op.constant(minval, dtype=dtype) maxval2 = constant_op.constant(maxval, dtype=dtype) return gen_random_ops.random_uniform_int( shape, minval=minval2, maxval=maxval2, seed=seed1, seed2=seed2) def new_uniform_int(dtype, gen): return gen.uniform(shape, minval=minval, maxval=maxval, dtype=dtype) def old_uniform(dtype, seed1, seed2): return gen_random_ops.random_uniform( shape, dtype=dtype, seed=seed1, seed2=seed2) def new_uniform(dtype, gen): return gen._uniform(shape, dtype=dtype) for dtype in floats: compare(dtype, old_normal, new_normal) compare(dtype, old_truncated_normal, new_truncated_normal) compare(dtype, old_uniform, new_uniform) for dtype in INTS: compare(dtype, old_uniform_int, new_uniform_int) @test_util.run_v2_only def testSameAsOldRandomOpsCPU(self): """Tests that the generated numbers are the same as the old random_ops.py. The CPU version. """ self._sameAsOldRandomOps("/device:CPU:0", CPU_FLOATS) @test_util.run_v2_only @test_util.run_cuda_only def testSameAsOldRandomOpsGPU(self): """Tests that the generated numbers are the same as the old random_ops.py. The GPU version. """ self._sameAsOldRandomOps(test_util.gpu_device_name(), GPU_FLOATS) @parameterized.parameters(INTS + [dtypes.uint32, dtypes.uint64]) @test_util.run_v2_only @test_util.run_cuda_only def testGPUEqualsCPU(self, dtype): """Tests that GPU and CPU generate the same integer outputs.""" seed = 1234 shape = [315, 49] with ops.device("/device:CPU:0"): cpu = random.Generator.from_seed(seed).uniform_full_int( shape=shape, dtype=dtype) with ops.device(test_util.gpu_device_name()): gpu = random.Generator.from_seed(seed).uniform_full_int( shape=shape, dtype=dtype) self.assertAllEqual(cpu, gpu) @parameterized.parameters(FLOATS + INTS) @test_util.run_v2_only def testUniformIsInRange(self, dtype): minval = 2 maxval = 33 size = 1000 gen = random.Generator.from_seed(1234) x = gen.uniform( shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy() self.assertTrue(np.all(x >= minval)) self.assertTrue(np.all(x < maxval)) @parameterized.parameters(FLOATS) @test_util.run_v2_only def testNormalIsFinite(self, dtype): gen = random.Generator.from_seed(1234) x = gen.normal(shape=[10000], dtype=dtype).numpy() self.assertTrue(np.all(np.isfinite(x))) @parameterized.parameters(FLOATS + INTS) @test_util.run_v2_only def testDistributionOfUniform(self, dtype): """Use Pearson's Chi-squared test to test for uniformity.""" n = 1000 seed = 12 gen = random.Generator.from_seed(seed) maxval = 1 if dtype.is_integer: maxval = 100 x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy() if maxval > 1: # Normalize y to range [0, 1). x = x.astype(float) / maxval # Tests that the values are distributed amongst 10 bins with equal # probability. 16.92 is the Chi^2 value for 9 degrees of freedom with # p=0.05. This test is probabilistic and would be flaky if the random # seed were not fixed. val = random_test_util.chi_squared(x, 10) self.assertLess(val, 16.92) @parameterized.parameters(FLOATS) @test_util.run_v2_only def testDistributionOfNormal(self, dtype): """Use Anderson-Darling test to test distribution appears normal.""" n = 1000 gen = random.Generator.from_seed(1234) x = gen.normal(shape=[n], dtype=dtype).numpy() # The constant 2.492 is the 5% critical value for the Anderson-Darling # test where the mean and variance are known. This test is probabilistic # so to avoid flakiness the seed is fixed. self.assertLess( random_test_util.anderson_darling(x.astype(float)), 2.492) @test_util.run_v2_only def testErrors(self): """Tests that proper errors are raised. """ shape = [2, 3] gen = random.Generator.from_seed(1234) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, r"must have shape \[\], not"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, [0, 0], shape) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, r"must have shape \[\], not"): gen_stateful_random_ops.rng_skip( gen.state.handle, gen.algorithm, [0, 0]) with self.assertRaisesWithPredicateMatch( TypeError, "EagerTensor of dtype int64"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, 1.1, shape) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "Unsupported algorithm id"): gen_stateful_random_ops.stateful_standard_normal_v2( gen.state.handle, 123, shape) var = variables.Variable([0, 0], dtype=dtypes.int32) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "dtype of RNG state variable must be int64, not"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) var = variables.Variable([[0]], dtype=dtypes.int64) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "RNG state must have one and only one dimension, not"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) var = variables.Variable([0], dtype=dtypes.int64) with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "For the Philox algorithm, the size of state must be at least"): gen_stateful_random_ops.stateful_standard_normal_v2( var.handle, random.RNG_ALG_PHILOX, shape) @test_util.run_v2_only def testSetGlobalGeneratorBadWithDefun(self): """Demonstrates that set_global_generator don't work properly with defun. """ shape = (3,) @def_function.function def f(): return random.get_global_generator().normal(shape) random.set_global_generator(random.Generator.from_seed(50)) with self.assertRaisesWithPredicateMatch( errors.NotFoundError, "Resource .+ does not exist"): _ = f() random.set_global_generator(random.Generator.from_seed(50)) _ = f() @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratSeq(self): """Tests RNG/MirrorStrategy interaction #1. If an RNG is created outside strategy.scope(), all replicas will access the same RNG object, and accesses are serialized. """ shape = [3, 4] dtype = dtypes.int32 gen = random.Generator.from_seed(1234) strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) with strat.scope(): def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica( fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllDifferent(values) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaSync(self): """Tests RNG/MirrorStrategy interaction #2. If an RNG is created inside strategy.scope(), each replica gets an mirror of this RNG. If they access their RNGs in the same manner, their random-number streams are the same. """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) with strat.scope(): gen = random.Generator.from_seed(1234) def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaSyncWithinFun(self): """Tests RNG/MirrorStrategy interaction #2b. If the RNG creation is within `f` in situation #2, the replicas' random-number streams are still the same. Note that whether the RNG creation is within strategy.scope() or not doesn't affect the result in this case (putting in inside strategy.scope() will cause unnecessary mirror creation and waste memory though). """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) def f(): gen = random.Generator.from_seed(1234) t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratUnseedSync(self): """Tests RNG/MirrorStrategy interaction #2c. If the RNG created in situation #2 is unseeded, the replicas' random-number streams are still the same. If the RNG created in situation #2b is unseeded, the replicas' random-number streams will be different. We can't test this for now because the op 'NonDeterministicInts' is not implemented on GPU yet. """ shape = [3, 4] dtype = dtypes.int32 strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()]) # TODO(wangpeng): support calling `random.Generator()` inside `f` (i.e. # inside `call_for_each_replica` so that each replica can get a # different random-number stream. The only obstacle is that op # 'NonDeterministicInts' is not implemented on GPU.) with strat.scope(): gen = random.Generator.from_non_deterministic_state() def f(): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica(fn=f) values = results.values self.assertAllEqual(2, len(values)) self.assertAllEqual(values[0], values[1]) @test_util.run_v2_only @test_util.run_cuda_only def testMirroredStratParaAsync(self): """Tests RNG/MirrorStrategy interaction #3. The user can create n independent RNGs outside strategy.scope(), where n is the number of replicas, and give one to each replica. The replicas can thus get different random-number streams. """ shape = [3, 4] dtype = dtypes.int32 gens = random.get_global_generator().split(count=2) devices = ["/cpu:0", test_util.gpu_device_name()] strat = MirroredStrategy(devices=devices) # Use `PerReplica` to specify which `gen` is sent to which replica gens = dist_values.PerReplica( device_map=dist_values.ReplicaDeviceMap(devices), values=[[g] for g in gens]) with strat.scope(): def f(gen): t1 = gen.uniform_full_int(shape=shape, dtype=dtype) t2 = gen.uniform_full_int(shape=shape, dtype=dtype) t = array_ops.stack([t1, t2]) return t results = strat.extended.call_for_each_replica( fn=f, args=gens) values = results.values self.assertAllEqual(2, len(values)) self.assertAllDifferent(values) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/stateful_random_ops_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations often used for initializing tensors. All variable initializers returned by functions in this file should have the following signature: def _initializer(shape, dtype=dtypes.float32): Args: shape: List of `int` representing the shape of the output `Tensor`. Some initializers may also be able to accept a `Tensor`. dtype: (Optional) Type of the output `Tensor`. Returns: A `Tensor` of type `dtype` and `shape`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.util.tf_export import tf_export class Initializer(object): """Initializer base class: all initializers inherit from this class. """ def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided will return tensor of `tf.float32`. """ raise NotImplementedError def get_config(self): """Returns the configuration of the initializer as a JSON-serializable dict. Returns: A JSON-serializable Python dict. """ return {} @classmethod def from_config(cls, config): """Instantiates an initializer from a configuration dictionary. Example: ```python initializer = RandomUniform(-1, 1) config = initializer.get_config() initializer = RandomUniform.from_config(config) ``` Args: config: A Python dictionary. It will typically be the output of `get_config`. Returns: An Initializer instance. """ config.pop("dtype", None) return cls(**config) @tf_export("zeros_initializer", v1=[]) class Zeros(Initializer): """Initializer that generates tensors initialized to 0.""" def __call__(self, shape, dtype=dtypes.float32): dtype = dtypes.as_dtype(dtype) return array_ops.zeros(shape, dtype) @tf_export("ones_initializer", v1=[]) class Ones(Initializer): """Initializer that generates tensors initialized to 1.""" def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. Raises: ValuesError: If the dtype is not numeric or boolean. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError("Expected numeric or boolean dtype, got %s." % dtype) return array_ops.ones(shape, dtype) @tf_export("constant_initializer", v1=[]) class Constant(Initializer): """Initializer that generates tensors with constant values. The resulting tensor is populated with values of type `dtype`, as specified by arguments `value` following the desired `shape` of the new tensor (see examples below). The argument `value` can be a constant value, or a list of values of type `dtype`. If `value` is a list, then the length of the list must be less than or equal to the number of elements implied by the desired shape of the tensor. In the case where the total number of elements in `value` is less than the number of elements required by the tensor shape, the last element in `value` will be used to fill the remaining entries. If the total number of elements in `value` is greater than the number of elements required by the tensor shape, the initializer will raise a `ValueError`. Args: value: A Python scalar, list or tuple of values, or a N-dimensional numpy array. All elements of the initialized variable will be set to the corresponding value in the `value` argument. Raises: TypeError: If the input `value` is not one of the expected types. Examples: The following example can be rewritten using a numpy.ndarray instead of the `value` list, even reshaped, as shown in the two commented lines below the `value` list initialization. ```python >>> import numpy as np >>> import tensorflow as tf >>> value = [0, 1, 2, 3, 4, 5, 6, 7] >>> # value = np.array(value) >>> # value = value.reshape([2, 4]) >>> init = tf.compat.v1.constant_initializer(value) >>> print('fitting shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) fitting shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.]] >>> print('larger shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[3, 4], initializer=init) >>> x.initializer.run() >>> print(x.eval()) larger shape: [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 7. 7. 7. 7.]] >>> print('smaller shape:') >>> with tf.compat.v1.Session(): >>> x = tf.compat.v1.get_variable('x', shape=[2, 3], initializer=init) ValueError: Too many elements provided. Needed at most 6, but received 8 ``` """ def __init__(self, value=0): if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))): raise TypeError( "Invalid type for initial value: %s (expected Python scalar, list or " "tuple of values, or numpy.ndarray)." % type(value)) self.value = value def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided the dtype of the tensor created will be the type of the inital value. Raises: TypeError: If the initializer cannot create a tensor of the requested dtype. """ if dtype is not None: dtype = dtypes.as_dtype(dtype) return constant_op.constant( self.value, dtype=dtype, shape=shape) def get_config(self): return {"value": self.value} @tf_export("random_uniform_initializer", v1=[]) class RandomUniform(Initializer): """Initializer that generates tensors with a uniform distribution. Args: minval: A python scalar or a scalar tensor. Lower bound of the range of random values to generate. maxval: A python scalar or a scalar tensor. Upper bound of the range of random values to generate. Defaults to 1 for float types. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, minval=-0.05, maxval=0.05, seed=None): self.minval = minval self.maxval = maxval self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point and integer types are supported. Raises: ValueError: If the dtype is not numeric. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_floating and not dtype.is_integer: raise ValueError("Expected float or integer dtype, got %s." % dtype) return self._random_generator.random_uniform(shape, self.minval, self.maxval, dtype) def get_config(self): return { "minval": self.minval, "maxval": self.maxval, "seed": self.seed } @tf_export("random_normal_initializer", v1=[]) class RandomNormal(Initializer): """Initializer that generates tensors with a normal distribution. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ dtype = _assert_float_dtype(dtype) return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed } class TruncatedNormal(Initializer): """Initializer that generates a truncated normal distribution. These values are similar to values from a `random_normal_initializer` except that values more than two standard deviations from the mean are discarded and re-drawn. This is the recommended initializer for neural network weights and filters. Args: mean: a python scalar or a scalar tensor. Mean of the random values to generate. stddev: a python scalar or a scalar tensor. Standard deviation of the random values to generate. seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. """ def __init__(self, mean=0.0, stddev=0.05, seed=None): self.mean = mean self.stddev = stddev self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ dtype = _assert_float_dtype(dtype) return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype) def get_config(self): return { "mean": self.mean, "stddev": self.stddev, "seed": self.seed } class VarianceScaling(Initializer): """Initializer capable of adapting its scale to the shape of weights tensors. With `distribution="truncated_normal" or "untruncated_normal"`, samples are drawn from a truncated/untruncated normal distribution with a mean of zero and a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)` where n is: - number of input units in the weight tensor, if mode = "fan_in" - number of output units, if mode = "fan_out" - average of the numbers of input and output units, if mode = "fan_avg" With `distribution="uniform"`, samples are drawn from a uniform distribution within [-limit, limit], with `limit = sqrt(3 * scale / n)`. Args: scale: Scaling factor (positive float). mode: One of "fan_in", "fan_out", "fan_avg". distribution: Random distribution to use. One of "truncated_normal", "untruncated_normal" and "uniform". seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. Raises: ValueError: In case of an invalid value for the "scale", mode" or "distribution" arguments. """ def __init__(self, scale=1.0, mode="fan_in", distribution="truncated_normal", seed=None): if scale <= 0.: raise ValueError("`scale` must be positive float.") if mode not in {"fan_in", "fan_out", "fan_avg"}: raise ValueError("Invalid `mode` argument:", mode) distribution = distribution.lower() # Compatibility with keras-team/keras. if distribution == "normal": distribution = "truncated_normal" if distribution not in {"uniform", "truncated_normal", "untruncated_normal"}: raise ValueError("Invalid `distribution` argument:", distribution) self.scale = scale self.mode = mode self.distribution = distribution self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ partition_info = None # Keeps logic so can be readded later if necessary dtype = _assert_float_dtype(dtype) scale = self.scale scale_shape = shape if partition_info is not None: scale_shape = partition_info.full_shape fan_in, fan_out = _compute_fans(scale_shape) if self.mode == "fan_in": scale /= max(1., fan_in) elif self.mode == "fan_out": scale /= max(1., fan_out) else: scale /= max(1., (fan_in + fan_out) / 2.) if self.distribution == "truncated_normal": # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = math.sqrt(scale) / .87962566103423978 return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype) elif self.distribution == "untruncated_normal": stddev = math.sqrt(scale) return self._random_generator.random_normal(shape, 0.0, stddev, dtype) else: limit = math.sqrt(3.0 * scale) return self._random_generator.random_uniform(shape, -limit, limit, dtype) def get_config(self): return { "scale": self.scale, "mode": self.mode, "distribution": self.distribution, "seed": self.seed } class Orthogonal(Initializer): """Initializer that generates an orthogonal matrix. If the shape of the tensor to initialize is two-dimensional, it is initialized with an orthogonal matrix obtained from the QR decomposition of a matrix of random numbers drawn from a normal distribution. If the matrix has fewer rows than columns then the output will have orthogonal rows. Otherwise, the output will have orthogonal columns. If the shape of the tensor to initialize is more than two-dimensional, a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])` is initialized, where `n` is the length of the shape vector. The matrix is subsequently reshaped to give a tensor of the desired shape. Args: gain: multiplicative factor to apply to the orthogonal matrix seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C) ([pdf](https://arxiv.org/pdf/1312.6120.pdf)) """ def __init__(self, gain=1.0, seed=None): self.gain = gain self.seed = seed self._random_generator = _RandomGenerator(seed) def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point or the input shape is not valid. """ dtype = _assert_float_dtype(dtype) # Check the shape if len(shape) < 2: raise ValueError("The tensor to initialize must be " "at least two-dimensional") # Flatten the input shape with the last dimension remaining # its original shape so it works for conv2d num_rows = 1 for dim in shape[:-1]: num_rows *= dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) # Generate a random matrix a = self._random_generator.random_normal(flat_shape, dtype=dtype) # Compute the qr factorization q, r = gen_linalg_ops.qr(a, full_matrices=False) # Make Q uniform d = array_ops.diag_part(r) q *= math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape) def get_config(self): return {"gain": self.gain, "seed": self.seed} class Identity(Initializer): """Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. """ def __init__(self, gain=1.0): self.gain = gain def __call__(self, shape, dtype=dtypes.float32): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. Raises: ValueError: If the dtype is not floating point """ partition_info = None # Keeps logic so can be readded later if necessary dtype = _assert_float_dtype(dtype) full_shape = shape if partition_info is None else partition_info.full_shape if len(full_shape) != 2: raise ValueError( "Identity matrix initializer can only be used for 2D matrices.") initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) if partition_info is not None: initializer = array_ops.slice(initializer, partition_info.var_offset, shape) return self.gain * initializer def get_config(self): return {"gain": self.gain} class GlorotUniform(VarianceScaling): """The Glorot uniform initializer, also called Xavier uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotUniform, self).__init__( scale=1.0, mode="fan_avg", distribution="uniform", seed=seed) def get_config(self): return {"seed": self.seed} class GlorotNormal(VarianceScaling): """The Glorot normal initializer, also called Xavier normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in the weight tensor and `fan_out` is the number of output units in the weight tensor. Args: seed: A Python integer. Used to create random seeds. See `tf.compat.v1.set_random_seed` for behavior. References: [Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html) ([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)) """ def __init__(self, seed=None): super(GlorotNormal, self).__init__( scale=1.0, mode="fan_avg", distribution="truncated_normal", seed=seed) def get_config(self): return {"seed": self.seed} # Aliases. # pylint: disable=invalid-name zeros_initializer = Zeros ones_initializer = Ones constant_initializer = Constant random_uniform_initializer = RandomUniform random_normal_initializer = RandomNormal truncated_normal_initializer = TruncatedNormal variance_scaling_initializer = VarianceScaling glorot_uniform_initializer = GlorotUniform glorot_normal_initializer = GlorotNormal orthogonal_initializer = Orthogonal identity_initializer = Identity # pylint: enable=invalid-name def lecun_normal(seed=None): """LeCun normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) ([pdf] (https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="truncated_normal", seed=seed) def lecun_uniform(seed=None): """LeCun uniform initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(3 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: - Self-Normalizing Neural Networks, [Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long ([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf)) - Efficient Backprop, [Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) """ return VarianceScaling( scale=1., mode="fan_in", distribution="uniform", seed=seed) def he_normal(seed=None): """He normal initializer. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="truncated_normal", seed=seed) def he_uniform(seed=None): """He uniform variance scaling initializer. It draws samples from a uniform distribution within [-limit, limit] where `limit` is `sqrt(6 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Arguments: seed: A Python integer. Used to seed the random generator. Returns: An initializer. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ return VarianceScaling( scale=2., mode="fan_in", distribution="uniform", seed=seed) # Utility functions. def _compute_fans(shape): """Computes the number of input and output units for a weight shape. Args: shape: Integer shape tuple or TF tensor shape. Returns: A tuple of scalars (fan_in, fan_out). """ if len(shape) < 1: # Just to avoid errors for constants. fan_in = fan_out = 1 elif len(shape) == 1: fan_in = fan_out = shape[0] elif len(shape) == 2: fan_in = shape[0] fan_out = shape[1] else: # Assuming convolution kernels (2D, 3D, or more). # kernel shape: (..., input_depth, depth) receptive_field_size = 1. for dim in shape[:-2]: receptive_field_size *= dim fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return fan_in, fan_out def _assert_float_dtype(dtype): """Validate and return floating point type based on `dtype`. `dtype` must be a floating point type. Args: dtype: The data type to validate. Returns: Validated type. Raises: ValueError: if `dtype` is not a floating point type. """ dtype = dtypes.as_dtype(dtype) if not dtype.is_floating: raise ValueError("Expected floating point type, got %s." % dtype) return dtype class _RandomGenerator(object): """Random generator that selects appropriate random ops.""" def __init__(self, seed=None): super(_RandomGenerator, self).__init__() if seed is not None: # Stateless random ops requires 2-int seed. self.seed = [seed, 0] else: self.seed = None def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32): """A deterministic random normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_normal else: op = random_ops.random_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) def random_uniform(self, shape, minval, maxval, dtype): """A deterministic random uniform if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_random_uniform else: op = random_ops.random_uniform return op( shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed) def truncated_normal(self, shape, mean, stddev, dtype): """A deterministic truncated normal if seed is passed.""" if self.seed: op = stateless_random_ops.stateless_truncated_normal else: op = random_ops.truncated_normal return op( shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed) # Compatibility aliases # pylint: disable=invalid-name zero = zeros = Zeros one = ones = Ones constant = Constant uniform = random_uniform = RandomUniform normal = random_normal = RandomNormal truncated_normal = TruncatedNormal identity = Identity orthogonal = Orthogonal glorot_normal = GlorotNormal glorot_uniform = GlorotUniform
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/init_ops_v2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= # pylint: disable=wildcard-import,unused-import """Protocol Buffer encoding and decoding from tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops.gen_decode_proto_ops import decode_proto_v2 as decode_proto from tensorflow.python.ops.gen_encode_proto_ops import encode_proto from tensorflow.python.util.tf_export import tf_export tf_export("io.decode_proto")(decode_proto) tf_export("io.encode_proto")(encode_proto) ops.NotDifferentiable("DecodeProtoV2") ops.NotDifferentiable("EncodeProto")
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/proto_ops.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Connects all half, float and double tensors to CheckNumericsOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["debugging.assert_all_finite", "verify_tensor_all_finite"]) @deprecation.deprecated_endpoints("verify_tensor_all_finite") def verify_tensor_all_finite(t=None, msg=None, name=None, x=None, message=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). x: Alias for t. message: Alias for msg. Returns: Same tensor as `t`. """ x = deprecation.deprecated_argument_lookup("x", x, "t", t) message = deprecation.deprecated_argument_lookup( "message", message, "msg", msg) return verify_tensor_all_finite_v2(x, message, name) @tf_export("debugging.assert_all_finite", v1=[]) def verify_tensor_all_finite_v2(x, message, name=None): """Assert that the tensor does not contain any NaN's or Inf's. Args: x: Tensor to check. message: Message to log on failure. name: A name for this operation (optional). Returns: Same tensor as `x`. """ with ops.name_scope(name, "VerifyFinite", [x]) as name: x = ops.convert_to_tensor(x, name="x") with ops.colocate_with(x): verify_input = array_ops.check_numerics(x, message=message) out = control_flow_ops.with_dependencies([verify_input], x) return out @tf_export(v1=["add_check_numerics_ops"]) def add_check_numerics_ops(): """Connect a `tf.debugging.check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the current default graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Note: This API is not compatible with the use of `tf.cond` or `tf.while_loop`, and will raise a `ValueError` if you attempt to call it in such a graph. Returns: A `group` op depending on all `check_numerics` ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To check for `Inf`s and `NaN`s under eager execution, call `tfe.seterr(inf_or_nan='raise')` once before executing the checked operations. @end_compatibility """ if context.executing_eagerly(): raise RuntimeError( "add_check_numerics_ops() is not compatible with eager execution. " "To check for Inf's and NaN's under eager execution, call " "tfe.seterr(inf_or_nan='raise') once before executing the " "checked operations.") check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: if op._get_control_flow_context() is not None: # pylint: disable=protected-access raise ValueError("`tf.add_check_numerics_ops() is not compatible " "with TensorFlow control flow operations such as " "`tf.cond()` or `tf.while_loop()`.") message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/numerics.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradients for (block) GRU/LSTM operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import gen_rnn_ops def _block_lstm_grad(op, *grads): """Gradient for the BlockLSTM op.""" seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b = op.inputs i, cs, f, o, ci, co, h = op.outputs _, cs_grad, _, _, _, _, h_grad = grads (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad) = gen_rnn_ops.block_lstm_grad( seq_len_max=seq_len_max, x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad, use_peephole=op.get_attr("use_peephole")) return (None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad) ops.RegisterGradient("BlockLSTM")(_block_lstm_grad) ops.RegisterGradient("BlockLSTMV2")(_block_lstm_grad)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/rnn_grad.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmark for Matmul operator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import time import numpy as np from tensorflow.python.client import session as session_lib from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def build_graph(device, n, m, k, transpose_a, transpose_b, dtype): """Build a graph containing a sequence of matmul operations. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. dtype: numpy data type of the input tensor. Returns: A matmul operation to run() """ with ops.device('%s' % device): if not transpose_a: x = variables.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False) else: x = variables.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False) if not transpose_b: y = variables.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False) else: y = variables.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False) z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b) return control_flow_ops.group(z) class MatmulBenchmark(test.Benchmark): """Benchmark matmul!""" def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype): """Run the graph and print its execution time. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. num_iters: number of iterations to run the benchmark. dtype: numpy data type of the input tensor. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() for _ in range(500): session.run(output) start_time = time.time() for _ in range(num_iters): session.run(output) duration = (time.time() - start_time) num_items = n * m * k * 2 throughput = num_items * num_iters / duration / 1e9 print('%s %s input_info:%s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + '.tb:' + str(transpose_b), num_iters, duration, throughput)) name_template = ('matmul_{device}_{dtype}_input_info_{inputinfo}') self.report_benchmark( name=name_template.format( device=device, dtype=str(dtype).replace(' ', ''), inputinfo=str(n) + 'x' + str(m) + 'x' + str(k) + ',ta:' + str(transpose_a) + ',tb:' + str(transpose_b)).replace(' ', ''), iters=num_iters, wall_time=duration) return duration def run_test_gpu(self, n, m, k, transpose_a, transpose_b, dtype, num_iters): self.run_graph(test.gpu_device_name(), n, m, k, transpose_a, transpose_b, num_iters, dtype) def test_round(self, num_iters): dtypes = [np.float32, np.float64] for dtype in dtypes: for n, m, (transpose_a, transpose_b) in itertools.product( [512, 1024], [1, 8, 16, 128], [(False, False), (True, False), (False, True)]): k = n self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) for n, m, k, (transpose_a, transpose_b) in itertools.product( [200], [1, 8, 20], [10000], [(False, False), (True, False), (False, True)]): self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) for (n, m, k), (transpose_a, transpose_b) in itertools.product( [(200, 20, 20000), (1, 10000, 200)], [(False, False), (True, False), (False, True)]): self.run_test_gpu(n, m, k, transpose_a, transpose_b, dtype, num_iters) def benchmark_matmul(self): self.test_round(num_iters=200) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/matmul_benchmark.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for manipulating the binary representations of integers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_bitwise_ops import * # pylint: enable=wildcard-import ops.NotDifferentiable("BitwiseAnd") ops.NotDifferentiable("BitwiseOr") ops.NotDifferentiable("BitwiseXor") ops.NotDifferentiable("Invert") ops.NotDifferentiable("PopulationCount") ops.NotDifferentiable("LeftShift") ops.NotDifferentiable("RightShift")
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/bitwise_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The TF2 version of the enum keras.losses.Reduction.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function class ReductionV2(object): """Types of loss reduction. Contains the following values: * `AUTO`: Indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, we expect reduction value to be `SUM` or `NONE`. Using `AUTO` in that case will raise an error. * `NONE`: Weighted losses with one dimension reduced (axis=-1, or axis specified by loss function). When this reduction type used with built-in Keras training loops like `fit`/`evaluate`, the unreduced vector loss is passed to the optimizer but the reported loss will be a scalar value. * `SUM`: Scalar sum of weighted losses. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. This reduction type is not supported when used with `tf.distribute.Strategy` outside of built-in training loops like `tf.keras` `compile`/`fit`. You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like: ``` with strategy.scope(): loss_obj = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE) .... loss = tf.reduce_sum(loss_object(labels, predictions)) * (1. / global_batch_size) ``` Please see https://www.tensorflow.org/alpha/tutorials/distribute/training_loops for more details on this. """ AUTO = 'auto' NONE = 'none' SUM = 'sum' SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' @classmethod def all(cls): return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE) @classmethod def validate(cls, key): if key not in cls.all(): raise ValueError('Invalid Reduction Key %s.' % key)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/loss_reduction.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for manipulating the loss collections.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export def squeeze_or_expand_dimensions(y_pred, y_true=None, sample_weight=None): """Squeeze or expand last dimension if needed. 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1 (using `confusion_matrix.remove_squeezable_dimensions`). 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1 from the new rank of `y_pred`. If `sample_weight` is scalar, it is kept scalar. This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: y_pred: Predicted values, a `Tensor` of arbitrary dimensions. y_true: Optional label `Tensor` whose dimensions match `y_pred`. sample_weight: Optional weight scalar or `Tensor` whose dimensions match `y_pred`. Returns: Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has the last dimension squeezed, `sample_weight` could be extended by one dimension. If `sample_weight` is None, (y_pred, y_true) is returned. """ y_pred_shape = y_pred.shape y_pred_rank = y_pred_shape.ndims if y_true is not None: # If sparse matrix is provided as `y_true`, the last dimension in `y_pred` # may be > 1. Eg: y_true = [0, 1, 2] (shape=(3,)), # y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]] (shape=(3, 3)) # In this case, we should not try to remove squeezable dimension. y_true_shape = y_true.shape y_true_rank = y_true_shape.ndims if (y_true_rank is not None) and (y_pred_rank is not None): # Use static rank for `y_true` and `y_pred`. if (y_pred_rank - y_true_rank != 1) or y_pred_shape[-1] == 1: y_true, y_pred = confusion_matrix.remove_squeezable_dimensions( y_true, y_pred) else: # Use dynamic rank. rank_diff = array_ops.rank(y_pred) - array_ops.rank(y_true) squeeze_dims = lambda: confusion_matrix.remove_squeezable_dimensions( # pylint: disable=g-long-lambda y_true, y_pred) is_last_dim_1 = math_ops.equal(1, array_ops.shape(y_pred)[-1]) maybe_squeeze_dims = lambda: control_flow_ops.cond( # pylint: disable=g-long-lambda is_last_dim_1, squeeze_dims, lambda: (y_true, y_pred)) y_true, y_pred = control_flow_ops.cond( math_ops.equal(1, rank_diff), maybe_squeeze_dims, squeeze_dims) if sample_weight is None: return y_pred, y_true sample_weight = ops.convert_to_tensor(sample_weight) weights_shape = sample_weight.shape weights_rank = weights_shape.ndims if weights_rank == 0: # If weights is scalar, do nothing. return y_pred, y_true, sample_weight if (y_pred_rank is not None) and (weights_rank is not None): # Use static rank. if weights_rank - y_pred_rank == 1: sample_weight = array_ops.squeeze(sample_weight, [-1]) elif y_pred_rank - weights_rank == 1: sample_weight = array_ops.expand_dims(sample_weight, [-1]) return y_pred, y_true, sample_weight # Use dynamic rank. weights_rank_tensor = array_ops.rank(sample_weight) rank_diff = weights_rank_tensor - array_ops.rank(y_pred) maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1]) def _maybe_expand_weights(): expand_weights = lambda: array_ops.expand_dims(sample_weight, [-1]) return control_flow_ops.cond( math_ops.equal(rank_diff, -1), expand_weights, lambda: sample_weight) def _maybe_adjust_weights(): return control_flow_ops.cond( math_ops.equal(rank_diff, 1), maybe_squeeze_weights, _maybe_expand_weights) # squeeze or expand last dim of `sample_weight` if its rank differs by 1 # from the new rank of `y_pred`. sample_weight = control_flow_ops.cond( math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight, _maybe_adjust_weights) return y_pred, y_true, sample_weight def scale_losses_by_sample_weight(losses, sample_weight): """Scales loss values by the given sample weights. `sample_weight` dimensions are updated to match with the dimension of `losses` if possible by using squeeze/expand/broadcast. Args: losses: Loss tensor. sample_weight: Sample weights tensor. Returns: `losses` scaled by `sample_weight` with dtype float32. """ # TODO(psv): Handle the casting here in a better way, eg. if losses is float64 # we do not want to lose precision. losses = math_ops.cast(losses, dtypes.float32) sample_weight = math_ops.cast(sample_weight, dtypes.float32) # Update dimensions of `sample_weight` to match with `losses` if possible. losses, _, sample_weight = squeeze_or_expand_dimensions( losses, None, sample_weight) # Broadcast weights if possible. sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, losses) return math_ops.multiply(losses, sample_weight) @tf_contextlib.contextmanager def check_per_example_loss_rank(per_example_loss): """Context manager that checks that the rank of per_example_loss is atleast 1. Args: per_example_loss: Per example loss tensor. Yields: A context manager. """ loss_rank = per_example_loss.shape.rank if loss_rank is not None: # Handle static rank. if loss_rank == 0: raise ValueError( "Invalid value passed for `per_example_loss`. Expected a tensor with " "at least rank 1, received: {}".format(per_example_loss)) yield else: # Handle dynamic rank. with ops.control_dependencies([ check_ops.assert_greater_equal( array_ops.rank(per_example_loss), math_ops.cast(1, dtype=dtypes.int32), message="Invalid value passed for `per_example_loss`. Expected a " "tensor with at least rank 1.") ]): yield @tf_export(v1=["losses.add_loss"]) def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES): """Adds a externally defined loss to the collection of losses. Args: loss: A loss `Tensor`. loss_collection: Optional collection to add the loss to. """ # Since we have no way of figuring out when a training iteration starts or # ends, holding on to a loss when executing eagerly is indistingishable from # leaking memory. We instead leave the collection empty. if loss_collection and not context.executing_eagerly(): ops.add_to_collection(loss_collection, loss) @tf_export(v1=["losses.get_losses"]) def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES): """Gets the list of losses from the loss_collection. Args: scope: An optional scope name for filtering the losses to return. loss_collection: Optional losses collection. Returns: a list of loss tensors. """ return ops.get_collection(loss_collection, scope) @tf_export(v1=["losses.get_regularization_losses"]) def get_regularization_losses(scope=None): """Gets the list of regularization losses. Args: scope: An optional scope name for filtering the losses to return. Returns: A list of regularization losses as Tensors. """ return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope) @tf_export(v1=["losses.get_regularization_loss"]) def get_regularization_loss(scope=None, name="total_regularization_loss"): """Gets the total regularization loss. Args: scope: An optional scope name for filtering the losses to return. name: The name of the returned tensor. Returns: A scalar regularization loss. """ losses = get_regularization_losses(scope) if losses: return math_ops.add_n(losses, name=name) else: return constant_op.constant(0.0) @tf_export(v1=["losses.get_total_loss"]) def get_total_loss(add_regularization_losses=True, name="total_loss", scope=None): """Returns a tensor whose value represents the total loss. In particular, this adds any losses you have added with `tf.add_loss()` to any regularization losses that have been added by regularization parameters on layers constructors e.g. `tf.layers`. Be very sure to use this if you are constructing a loss_op manually. Otherwise regularization arguments on `tf.layers` methods will not function. Args: add_regularization_losses: A boolean indicating whether or not to use the regularization losses in the sum. name: The name of the returned tensor. scope: An optional scope name for filtering the losses to return. Note that this filters the losses added with `tf.add_loss()` as well as the regularization losses to that scope. Returns: A `Tensor` whose value represents the total loss. Raises: ValueError: if `losses` is not iterable. """ losses = get_losses(scope=scope) if add_regularization_losses: losses += get_regularization_losses(scope=scope) return math_ops.add_n(losses, name=name)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/util.py
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of Loss operations for use in neural networks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import confusion_matrix from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.ops.losses import util from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.deprecation import deprecated_argument_lookup from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["losses.Reduction"]) class Reduction(object): """Types of loss reduction. Contains the following values: * `NONE`: Un-reduced weighted losses with the same shape as input. * `SUM`: Scalar sum of weighted losses. * `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED. * `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses. * `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero weights. DEPRECATED. * `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED. """ NONE = "none" SUM = "weighted_sum" SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size" MEAN = "weighted_mean" SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights" SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS @classmethod def all(cls): return ( cls.NONE, cls.SUM, cls.MEAN, cls.SUM_OVER_BATCH_SIZE, cls.SUM_OVER_NONZERO_WEIGHTS, cls.SUM_BY_NONZERO_WEIGHTS) @classmethod def validate(cls, key): if key not in cls.all(): raise ValueError("Invalid Reduction Key %s." % key) def _safe_mean(losses, num_present): """Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. num_present: The number of measurable elements in `losses`. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned. """ total_loss = math_ops.reduce_sum(losses) return math_ops.div_no_nan(total_loss, num_present, name="value") def _num_present(losses, weights, per_batch=False): """Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension `[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is, in effect, tiled to match the shape of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: `Tensor` of shape `[]`, `[batch_size]` or `[batch_size, d1, ... dK]`, where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is `True`, the value is returned as a tensor of size `[batch_size]`. Otherwise, a single scalar tensor is returned. """ if ((isinstance(weights, float) and weights != 0.0) or (context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access and not math_ops.equal(weights, 0.0))): return _num_elements(losses) with ops.name_scope(None, "num_present", (losses, weights)) as scope: weights = math_ops.cast(weights, dtype=dtypes.float32) present = array_ops.where( math_ops.equal(weights, 0.0), array_ops.zeros_like(weights), array_ops.ones_like(weights)) present = weights_broadcast_ops.broadcast_weights(present, losses) if per_batch: return math_ops.reduce_sum( present, axis=math_ops.range(1, array_ops.rank(present)), keepdims=True, name=scope) return math_ops.reduce_sum(present, name=scope) def _num_elements(losses): """Computes the number of elements in `losses` tensor.""" with ops.name_scope(None, "num_elements", values=[losses]) as scope: return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype) @tf_export(v1=["losses.compute_weighted_loss"]) def compute_weighted_loss( losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Computes the weighted loss. Args: losses: `Tensor` of shape `[batch_size, d1, ... dN]`. weights: Optional `Tensor` whose rank is either 0, or the same rank as `losses`, and must be broadcastable to `losses` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: the scope for the operations performed in computing the loss. loss_collection: the loss will be added to these collections. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `losses`. If `reduction` is `NONE`, this has the same shape as `losses`; otherwise, it is scalar. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. Note: When calculating the gradient of a weighted loss contributions from both `losses` and `weights` are considered. If your `weights` depend on some model parameters but you do not want this to affect the loss gradient, you need to apply `tf.stop_gradient` to `weights` before passing them to `compute_weighted_loss`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ Reduction.validate(reduction) with ops.name_scope(scope, "weighted_loss", (losses, weights)): # Save the `reduction` argument for loss normalization when distributing # to multiple replicas. Used only for estimator + v1 optimizer flow. ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access with ops.control_dependencies(( weights_broadcast_ops.assert_broadcastable(weights, losses),)): losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype losses = math_ops.cast(losses, dtype=dtypes.float32) weights = math_ops.cast(weights, dtype=dtypes.float32) weighted_losses = math_ops.multiply(losses, weights) if reduction == Reduction.NONE: loss = weighted_losses else: loss = math_ops.reduce_sum(weighted_losses) if reduction == Reduction.MEAN: loss = _safe_mean( loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights)) elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS): loss = _safe_mean(loss, _num_present(losses, weights)) elif reduction == Reduction.SUM_OVER_BATCH_SIZE: loss = _safe_mean(loss, _num_elements(losses)) # Convert the result back to the input type. loss = math_ops.cast(loss, input_dtype) util.add_loss(loss, loss_collection) return loss @tf_export(v1=["losses.absolute_difference"]) def absolute_difference( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds an Absolute Difference loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a `Tensor` of shape `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid or if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "absolute_difference", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.abs(math_ops.subtract(predictions, labels)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.cosine_distance"]) @deprecated_args(None, "dim is deprecated, use axis instead", "dim") def cosine_distance( labels, predictions, axis=None, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS, dim=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: labels: `Tensor` whose shape matches 'predictions' predictions: An arbitrary matrix. axis: The dimension along which the cosine distance is computed. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which this loss will be added. reduction: Type of reduction to apply to loss. dim: The old (deprecated) name for `axis`. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `axis`, `labels`, `predictions` or `weights` is `None`. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ axis = deprecated_argument_lookup("axis", axis, "dim", dim) if axis is None: raise ValueError("You must specify 'axis'.") if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "cosine_distance_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) radial_diffs = math_ops.multiply(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.hinge_loss"]) def hinge_loss(labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a hinge loss to the training procedure. Args: labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. Internally the {0,1} labels are converted to {-1,1} when calculating the hinge loss. logits: The logits, a float tensor. Note that logits are assumed to be unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive (resp. negative) binary prediction. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits` and `labels` don't match or if `labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope: logits = math_ops.cast(logits, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) losses = nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.huber_loss"]) def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Huber Loss term to the training procedure. For each value x in `error=labels-predictions`, the following is calculated: ``` 0.5 * x^2 if |x| <= d 0.5 * d^2 + d * (|x| - d) if |x| > d ``` where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). delta: `float`, the point where the huber loss function changes from a quadratic to linear. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "huber_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) error = math_ops.subtract(predictions, labels) abs_error = math_ops.abs(error) quadratic = math_ops.minimum(abs_error, delta) # The following expression is the same in value as # tf.maximum(abs_error - delta, 0), but importantly the gradient for the # expression when abs_error == delta is 0 (for tf.maximum it would be 1). # This is necessary to avoid doubling the gradient, since there is already a # nonzero contribution to the gradient from the quadratic term. linear = math_ops.subtract(abs_error, quadratic) losses = math_ops.add( math_ops.multiply( ops.convert_to_tensor(0.5, dtype=quadratic.dtype), math_ops.multiply(quadratic, quadratic)), math_ops.multiply(delta, linear)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.log_loss"]) def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Log Loss term to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "log_loss", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = -math_ops.multiply( labels, math_ops.log(predictions + epsilon)) - math_ops.multiply( (1 - labels), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) # TODO(b/37208492): Add reduction arg. @tf_export(v1=["losses.mean_pairwise_squared_error"]) def mean_pairwise_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES): """Adds a pairwise-errors-squared loss to the training procedure. Unlike `mean_squared_error`, which is a measure of the differences between corresponding elements of `predictions` and `labels`, `mean_pairwise_squared_error` is a measure of the differences between pairs of corresponding elements of `predictions` and `labels`. For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are three pairs of differences are summed to compute the loss: loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3 Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the corresponding pairs are computed within each batch sample but not across samples within a batch. For example, if `predictions` represents a batch of 16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs is drawn from each image, but not across images. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. Args: labels: The ground truth output tensor, whose shape must match the shape of `predictions`. predictions: The predicted outputs, a tensor of size `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in `predictions`. weights: Coefficients for the loss a scalar, a tensor of shape `[batch_size]` or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "mean_pairwise_squared_error", (predictions, labels, weights)) as scope: weights = math_ops.cast(weights, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) with ops.control_dependencies(( weights_broadcast_ops.assert_broadcastable(weights, labels),)): predictions = math_ops.cast(predictions, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) diffs = math_ops.subtract(predictions, labels) axis = math_ops.range(1, array_ops.rank(diffs)) sum_squares_diff_per_batch = math_ops.reduce_sum( math_ops.square(diffs), axis=axis, keepdims=True) num_present_per_batch = _num_present(diffs, weights, per_batch=True) term1 = 2.0 * math_ops.div_no_nan( sum_squares_diff_per_batch, math_ops.maximum(num_present_per_batch - 1, 0), name="value") sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True) term2 = 2.0 * math_ops.div_no_nan( math_ops.square(sum_diff), math_ops.maximum( math_ops.multiply(num_present_per_batch, num_present_per_batch - 1), 0), name="value") weighted_losses = math_ops.multiply(term1 - term2, weights) loss = math_ops.reduce_sum(weighted_losses) mean_loss = array_ops.where( math_ops.reduce_sum(num_present_per_batch) > 0, loss, array_ops.zeros_like(loss), name="value") util.add_loss(mean_loss, loss_collection) return mean_loss @tf_export(v1=["losses.mean_squared_error"]) def mean_squared_error( labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Adds a Sum-of-Squares loss to the training procedure. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of size `[batch_size]`, then the total loss for each sample of the batch is rescaled by the corresponding element in the `weights` vector. If the shape of `weights` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weights`. Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weights` is invalid. Also if `labels` or `predictions` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if predictions is None: raise ValueError("predictions must not be None.") with ops.name_scope(scope, "mean_squared_error", (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype=dtypes.float32) labels = math_ops.cast(labels, dtype=dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = math_ops.squared_difference(predictions, labels) return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.sigmoid_cross_entropy"]) def sigmoid_cross_entropy( multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/2: new_multiclass_labels = multiclass_labels * (1 - label_smoothing) + 0.5 * label_smoothing Args: multi_class_labels: `[batch_size, num_classes]` target integer labels in `{0, 1}`. logits: Float `[batch_size, num_classes]` logits outputs of the network. weights: Optional `Tensor` whose rank is either 0, or the same rank as `labels`, and must be broadcastable to `labels` (i.e., all dimensions must be either `1`, or the same as the corresponding `losses` dimension). label_smoothing: If greater than `0` then smooth the labels. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `logits`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `multi_class_labels` or if the shape of `weights` is invalid, or if `weights` is None. Also if `multi_class_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if multi_class_labels is None: raise ValueError("multi_class_labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "sigmoid_cross_entropy_loss", (logits, multi_class_labels, weights)) as scope: logits = ops.convert_to_tensor(logits) multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype) logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape()) if label_smoothing > 0: multi_class_labels = (multi_class_labels * (1 - label_smoothing) + 0.5 * label_smoothing) losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) @tf_export(v1=["losses.softmax_cross_entropy"]) def softmax_cross_entropy( onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes: new_onehot_labels = onehot_labels * (1 - label_smoothing) + label_smoothing / num_classes Note that `onehot_labels` and `logits` must have the same shape, e.g. `[batch_size, num_classes]`. The shape of `weights` must be broadcastable to loss, whose shape is decided by the shape of `logits`. In case the shape of `logits` is `[batch_size, num_classes]`, loss is a `Tensor` of shape `[batch_size]`. Args: onehot_labels: One-hot-encoded labels. logits: Logits outputs of the network. weights: Optional `Tensor` that is broadcastable to loss. label_smoothing: If greater than 0 then smooth the labels. scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has shape `[batch_size]`; otherwise, it is scalar. Raises: ValueError: If the shape of `logits` doesn't match that of `onehot_labels` or if the shape of `weights` is invalid or if `weights` is None. Also if `onehot_labels` or `logits` is None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if onehot_labels is None: raise ValueError("onehot_labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "softmax_cross_entropy_loss", (logits, onehot_labels, weights)) as scope: logits = ops.convert_to_tensor(logits) onehot_labels = math_ops.cast(onehot_labels, logits.dtype) logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape()) if label_smoothing > 0: num_classes = math_ops.cast( array_ops.shape(onehot_labels)[-1], logits.dtype) smooth_positives = 1.0 - label_smoothing smooth_negatives = label_smoothing / num_classes onehot_labels = onehot_labels * smooth_positives + smooth_negatives onehot_labels = array_ops.stop_gradient( onehot_labels, name="labels_stop_gradient") losses = nn.softmax_cross_entropy_with_logits_v2( labels=onehot_labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction) # TODO(ptucker): Merge this with similar method in metrics_impl. def _remove_squeezable_dimensions( labels, predictions, weights=None, expected_rank_diff=0): """Internal version of _remove_squeezable_dimensions which handles weights. Squeezes `predictions` and `labels` if their ranks differ from expected by exactly 1. Squeezes `weights` if its rank is 1 more than the new rank of `predictions` This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: labels: Label values, a `Tensor` whose dimensions match `predictions`. predictions: Predicted values, a `Tensor` of arbitrary dimensions. weights: Optional weight `Tensor`. It will be squeezed if it's not scalar, and its rank is 1 more than the new rank of `labels`. expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. Returns: Tuple of `predictions`, `labels` and `weights`, possibly with the last dimension squeezed. """ labels, predictions = confusion_matrix.remove_squeezable_dimensions( labels, predictions, expected_rank_diff=expected_rank_diff) if weights is not None: weights = ops.convert_to_tensor(weights) labels_rank = labels.get_shape().ndims weights_shape = weights.get_shape() weights_rank = weights_shape.ndims if (labels_rank is not None) and (weights_rank is not None): # Use static rank. rank_diff = weights_rank - labels_rank if rank_diff == 1: weights = array_ops.squeeze(weights, [-1]) return labels, predictions, weights # Use dynamic rank. rank_diff = array_ops.rank(weights) - array_ops.rank(labels) if (weights_rank is None) or ( weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)): weights = control_flow_ops.cond( math_ops.equal(1, rank_diff), lambda: array_ops.squeeze(weights, [-1]), lambda: weights) return labels, predictions, weights @tf_export(v1=["losses.sparse_softmax_cross_entropy"]) def sparse_softmax_cross_entropy( labels, logits, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS): """Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`. `weights` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weights` is a tensor of shape `[batch_size]`, then the loss weights apply to each corresponding sample. Args: labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of `labels` and result) and dtype `int32` or `int64`. Each entry in `labels` must be an index in `[0, num_classes)`. Other values will raise an exception when this op is run on CPU, and return `NaN` for corresponding loss and gradient rows on GPU. logits: Unscaled log probabilities of shape `[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or `float64`. weights: Coefficients for the loss. This must be scalar or broadcastable to `labels` (i.e. same rank and each dimension is either 1 or the same). scope: the scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss `Tensor` of the same type as `logits`. If `reduction` is `NONE`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the shapes of `logits`, `labels`, and `weights` are incompatible, or if any of them are None. @compatibility(eager) The `loss_collection` argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a `tf.keras.Model`. @end_compatibility """ if labels is None: raise ValueError("labels must not be None.") if logits is None: raise ValueError("logits must not be None.") with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss", (logits, labels, weights)) as scope: # As documented above in Args, labels contain class IDs and logits contains # 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1; # therefore, expected_rank_diff=1. labels, logits, weights = _remove_squeezable_dimensions( labels, logits, weights, expected_rank_diff=1) losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name="xentropy") return compute_weighted_loss( losses, weights, scope, loss_collection, reduction=reduction)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/losses_impl.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for losses util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops.losses import util from tensorflow.python.platform import test class LossesUtilTest(test.TestCase): @test_util.run_deprecated_v1 def testGetRegularizationLoss(self): # Empty regularization collection should evaluate to 0.0. with self.cached_session(): self.assertEqual(0.0, util.get_regularization_loss().eval()) # Loss should sum. ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0)) ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0)) with self.cached_session(): self.assertEqual(5.0, util.get_regularization_loss().eval()) # Check scope capture mechanism. with ops.name_scope('scope1'): ops.add_to_collection( ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(-1.0)) with self.cached_session(): self.assertEqual(-1.0, util.get_regularization_loss('scope1').eval()) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/util_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Loss operations for use in neural networks. Note: All the losses are added to the `GraphKeys.LOSSES` collection by default. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.python.ops.losses.losses_impl import * from tensorflow.python.ops.losses.util import * # pylint: enable=wildcard-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/losses/losses.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a Householder transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorHouseholder",] @tf_export("linalg.LinearOperatorHouseholder") class LinearOperatorHouseholder(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] of Householder transformations. This operator acts like a [batch] of householder reflections with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorHouseholder` is initialized with a (batch) vector. A Householder reflection, defined via a vector `v`, which reflects points in `R^n` about the hyperplane orthogonal to `v` and through the origin. ```python # Create a 2 x 2 householder transform. vec = [1 / np.sqrt(2), 1. / np.sqrt(2)] operator = LinearOperatorHouseholder(vec) operator.to_dense() ==> [[0., -1.] [-1., -0.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, reflection_axis, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorHouseholder"): r"""Initialize a `LinearOperatorHouseholder`. Args: reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The vector defining the hyperplane to reflect about. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This is autoset to true is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices This is autoset to false. is_square: Expect that this operator acts like square [batch] matrices. This is autoset to true. name: A name for this `LinearOperator`. Raises: ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is not `False` or `is_square` is not `True`. """ with ops.name_scope(name, values=[reflection_axis]): self._reflection_axis = linear_operator_util.convert_nonref_to_tensor( reflection_axis, name="reflection_axis") self._check_reflection_axis(self._reflection_axis) # Check and auto-set hints. if is_self_adjoint is False: # pylint:disable=g-bool-id-comparison raise ValueError("A Householder operator is always self adjoint.") else: is_self_adjoint = True if is_positive_definite is True: # pylint:disable=g-bool-id-comparison raise ValueError( "A Householder operator is always non-positive definite.") else: is_positive_definite = False if is_square is False: # pylint:disable=g-bool-id-comparison raise ValueError("A Householder operator is always square.") is_square = True super(LinearOperatorHouseholder, self).__init__( dtype=self._reflection_axis.dtype, graph_parents=[self._reflection_axis], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_reflection_axis(self, reflection_axis): """Static check of reflection_axis.""" if (reflection_axis.shape.ndims is not None and reflection_axis.shape.ndims < 1): raise ValueError( "Argument reflection_axis must have at least 1 dimension. " "Found: %s" % reflection_axis) def _shape(self): # If d_shape = [5, 3], we return [5, 3, 3]. d_shape = self._reflection_axis.shape return d_shape.concatenate(d_shape[-1:]) def _shape_tensor(self): d_shape = array_ops.shape(self._reflection_axis) k = d_shape[-1] return array_ops.concat((d_shape, [k]), 0) def _assert_non_singular(self): return control_flow_ops.no_op("assert_non_singular") def _assert_positive_definite(self): raise errors.InvalidArgumentError( node_def=None, op=None, message="Householder operators are always " "non-positive definite.") def _assert_self_adjoint(self): return control_flow_ops.no_op("assert_self_adjoint") def _matmul(self, x, adjoint=False, adjoint_arg=False): # Given a vector `v`, we would like to reflect `x` about the hyperplane # orthogonal to `v` going through the origin. We first project `x` to `v` # to get v * dot(v, x) / dot(v, v). After we project, we can reflect the # projection about the hyperplane by flipping sign to get # -v * dot(v, x) / dot(v, v). Finally, we can add back the component # that is orthogonal to v. This is invariant under reflection, since the # whole hyperplane is invariant. This component is equal to x - v * dot(v, # x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v) # for the reflection. # Note that because this is a reflection, it lies in O(n) (for real vector # spaces) or U(n) (for complex vector spaces), and thus is its own adjoint. reflection_axis = ops.convert_to_tensor(self.reflection_axis) x = linalg.adjoint(x) if adjoint_arg else x normalized_axis = reflection_axis / linalg.norm( reflection_axis, axis=-1, keepdims=True) mat = normalized_axis[..., array_ops.newaxis] x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True) return x - 2 * mat * x_dot_normalized_v def _trace(self): # We have (n - 1) +1 eigenvalues and a single -1 eigenvalue. return math_ops.cast( self.domain_dimension_tensor() - 2, self.dtype) * array_ops.ones( shape=self.batch_shape_tensor(), dtype=self.dtype) def _determinant(self): # For householder transformations, the determinant is -1. return -array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) def _log_abs_determinant(self): # Orthogonal matrix -> log|Q| = 0. return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) def _solve(self, rhs, adjoint=False, adjoint_arg=False): # A householder reflection is a reflection, hence is idempotent. Thus we # can just apply a matmul. return self._matmul(rhs, adjoint, adjoint_arg) def _to_dense(self): normalized_axis = self.reflection_axis / linalg.norm( self.reflection_axis, axis=-1, keepdims=True) mat = normalized_axis[..., array_ops.newaxis] matrix = -2 * math_ops.matmul(mat, mat, adjoint_b=True) return array_ops.matrix_set_diag( matrix, 1. + array_ops.matrix_diag_part(matrix)) def _diag_part(self): normalized_axis = self.reflection_axis / linalg.norm( self.reflection_axis, axis=-1, keepdims=True) return 1. - 2 * normalized_axis * math_ops.conj(normalized_axis) @property def reflection_axis(self): return self._reflection_axis
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_householder.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a diagonal matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorDiag",] @tf_export("linalg.LinearOperatorDiag") class LinearOperatorDiag(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] square diagonal matrix. This operator acts like a [batch] diagonal matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorDiag` is initialized with a (batch) vector. ```python # Create a 2 x 2 diagonal linear operator. diag = [1., -1.] operator = LinearOperatorDiag(diag) operator.to_dense() ==> [[1., 0.] [0., -1.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. diag = tf.random.normal(shape=[2, 3, 4]) operator = LinearOperatorDiag(diag) # Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible # since the batch dimensions, [2, 1], are broadcast to # operator.batch_shape = [2, 3]. y = tf.random.normal(shape=[2, 1, 4, 2]) x = operator.solve(y) ==> operator.matmul(x) = y ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Performance Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N * R` multiplications. * `operator.solve(x)` involves `N` divisions and `N * R` multiplications. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, diag, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorDiag"): r"""Initialize a `LinearOperatorDiag`. Args: diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The diagonal of the operator. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `diag.dtype` is real, this is auto-set to `True`. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: TypeError: If `diag.dtype` is not an allowed type. ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`. """ with ops.name_scope(name, values=[diag]): self._diag = linear_operator_util.convert_nonref_to_tensor( diag, name="diag") self._check_diag(self._diag) # Check and auto-set hints. if not self._diag.dtype.is_complex: if is_self_adjoint is False: raise ValueError("A real diagonal operator is always self adjoint.") else: is_self_adjoint = True if is_square is False: raise ValueError("Only square diagonal operators currently supported.") is_square = True super(LinearOperatorDiag, self).__init__( dtype=self._diag.dtype, graph_parents=[self._diag], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_diag(self, diag): """Static check of diag.""" if diag.shape.ndims is not None and diag.shape.ndims < 1: raise ValueError("Argument diag must have at least 1 dimension. " "Found: %s" % diag) def _shape(self): # If d_shape = [5, 3], we return [5, 3, 3]. d_shape = self._diag.shape return d_shape.concatenate(d_shape[-1:]) def _shape_tensor(self): d_shape = array_ops.shape(self._diag) k = d_shape[-1] return array_ops.concat((d_shape, [k]), 0) def _assert_non_singular(self): return linear_operator_util.assert_no_entries_with_modulus_zero( self._diag, message="Singular operator: Diagonal contained zero values.") def _assert_positive_definite(self): if self.dtype.is_complex: message = ( "Diagonal operator had diagonal entries with non-positive real part, " "thus was not positive definite.") else: message = ( "Real diagonal operator had non-positive diagonal entries, " "thus was not positive definite.") return check_ops.assert_positive( math_ops.real(self._diag), message=message) def _assert_self_adjoint(self): return linear_operator_util.assert_zero_imag_part( self._diag, message=( "This diagonal operator contained non-zero imaginary values. " " Thus it was not self-adjoint.")) def _matmul(self, x, adjoint=False, adjoint_arg=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag x = linalg.adjoint(x) if adjoint_arg else x diag_mat = array_ops.expand_dims(diag_term, -1) return diag_mat * x def _matvec(self, x, adjoint=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag return diag_term * x def _determinant(self): return math_ops.reduce_prod(self._diag, axis=[-1]) def _log_abs_determinant(self): log_det = math_ops.reduce_sum( math_ops.log(math_ops.abs(self._diag)), axis=[-1]) if self.dtype.is_complex: log_det = math_ops.cast(log_det, dtype=self.dtype) return log_det def _solve(self, rhs, adjoint=False, adjoint_arg=False): diag_term = math_ops.conj(self._diag) if adjoint else self._diag rhs = linalg.adjoint(rhs) if adjoint_arg else rhs inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1) return rhs * inv_diag_mat def _to_dense(self): return array_ops.matrix_diag(self._diag) def _diag_part(self): return self.diag def _add_to_tensor(self, x): x_diag = array_ops.matrix_diag_part(x) new_diag = self._diag + x_diag return array_ops.matrix_set_diag(x, new_diag) @property def diag(self): return self._diag
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_diag.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Inverts a non-singular `LinearOperator`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [] @tf_export("linalg.LinearOperatorInversion") class LinearOperatorInversion(linear_operator.LinearOperator): """`LinearOperator` representing the inverse of another operator. This operator represents the inverse of another operator. ```python # Create a 2 x 2 linear operator. operator = LinearOperatorFullMatrix([[1., 0.], [0., 2.]]) operator_inv = LinearOperatorInversion(operator) operator_inv.to_dense() ==> [[1., 0.] [0., 0.5]] operator_inv.shape ==> [2, 2] operator_inv.log_abs_determinant() ==> - log(2) x = ... Shape [2, 4] Tensor operator_inv.matmul(x) ==> Shape [2, 4] Tensor, equal to operator.solve(x) ``` #### Performance The performance of `LinearOperatorInversion` depends on the underlying operators performance: `solve` and `matmul` are swapped, and determinant is inverted. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operator, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize a `LinearOperatorInversion`. `LinearOperatorInversion` is initialized with an operator `A`. The `solve` and `matmul` methods are effectively swapped. E.g. ``` A = MyLinearOperator(...) B = LinearOperatorInversion(A) x = [....] # a vector assert A.matvec(x) == B.solvevec(x) ``` Args: operator: `LinearOperator` object. If `operator.is_non_singular == False`, an exception is raised. We do allow `operator.is_non_singular == None`, in which case this operator will have `is_non_singular == None`. Similarly for `is_self_adjoint` and `is_positive_definite`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Default is `operator.name + "_inv"`. Raises: ValueError: If `operator.is_non_singular` is False. """ self._operator = operator # Auto-set and check hints. if operator.is_non_singular is False or is_non_singular is False: raise ValueError( "operator and supplied hints must have `is_non_singular` equal to " "`True` or `None`. Found %s, %s" % (operator.is_non_singular, is_non_singular)) if operator.is_square is False or is_square is False: raise ValueError( "operator and supplied hints must have `is_square` equal to " "`True` or `None`. Found %s, %s" % (operator.is_square, is_square)) # The congruency of is_non_singular and is_self_adjoint was checked in the # base operator. Other hints are, in this special case of inversion, ones # that must be the same for base/derived operator. combine_hint = ( linear_operator_util.use_operator_or_provided_hint_unless_contradicting) is_square = combine_hint( operator, "is_square", is_square, "An operator is square if and only if its inverse is square.") is_non_singular = combine_hint( operator, "is_non_singular", is_non_singular, "An operator is non-singular if and only if its inverse is " "non-singular.") is_self_adjoint = combine_hint( operator, "is_self_adjoint", is_self_adjoint, "An operator is self-adjoint if and only if its inverse is " "self-adjoint.") is_positive_definite = combine_hint( operator, "is_positive_definite", is_positive_definite, "An operator is positive-definite if and only if its inverse is " "positive-definite.") # Initialization. if name is None: name = operator.name + "_inv" with ops.name_scope(name, values=operator.graph_parents): super(LinearOperatorInversion, self).__init__( dtype=operator.dtype, graph_parents=operator.graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @property def operator(self): """The operator before inversion.""" return self._operator def _assert_non_singular(self): return self.operator.assert_non_singular() def _assert_positive_definite(self): return self.operator.assert_positive_definite() def _assert_self_adjoint(self): return self.operator.assert_self_adjoint() def _shape(self): return self.operator.shape def _shape_tensor(self): return self.operator.shape_tensor() def _matmul(self, x, adjoint=False, adjoint_arg=False): return self.operator.solve(x, adjoint=adjoint, adjoint_arg=adjoint_arg) def _determinant(self): return 1. / self.operator.determinant() def _log_abs_determinant(self): return -1. * self.operator.log_abs_determinant() def _solve(self, rhs, adjoint=False, adjoint_arg=False): return self.operator.matmul(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_inversion.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a lower triangular matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorLowerTriangular", ] @tf_export("linalg.LinearOperatorLowerTriangular") class LinearOperatorLowerTriangular(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] square lower triangular matrix. This operator acts like a [batch] lower triangular matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. `LinearOperatorLowerTriangular` is initialized with a `Tensor` having dimensions `[B1,...,Bb, N, N]`. The upper triangle of the last two dimensions is ignored. ```python # Create a 2 x 2 lower-triangular linear operator. tril = [[1., 2.], [3., 4.]] operator = LinearOperatorLowerTriangular(tril) # The upper triangle is ignored. operator.to_dense() ==> [[1., 0.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. tril = tf.random.normal(shape=[2, 3, 4, 4]) operator = LinearOperatorLowerTriangular(tril) ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` #### Performance Suppose `operator` is a `LinearOperatorLowerTriangular` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` involves `N^2 * R` multiplications. * `operator.solve(x)` involves `N * R` size `N` back-substitutions. * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, tril, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorLowerTriangular"): r"""Initialize a `LinearOperatorLowerTriangular`. Args: tril: Shape `[B1,...,Bb, N, N]` with `b >= 0`, `N >= 0`. The lower triangular part of `tril` defines this operator. The strictly upper triangle is ignored. is_non_singular: Expect that this operator is non-singular. This operator is non-singular if and only if its diagonal elements are all non-zero. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. This operator is self-adjoint only if it is diagonal with real-valued diagonal entries. In this case it is advised to use `LinearOperatorDiag`. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If `is_square` is `False`. """ if is_square is False: raise ValueError( "Only square lower triangular operators supported at this time.") is_square = True with ops.name_scope(name, values=[tril]): self._tril = linear_operator_util.convert_nonref_to_tensor(tril, name="tril") self._check_tril(self._tril) super(LinearOperatorLowerTriangular, self).__init__( dtype=self._tril.dtype, graph_parents=[self._tril], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_tril(self, tril): """Static check of the `tril` argument.""" if tril.shape.ndims is not None and tril.shape.ndims < 2: raise ValueError( "Argument tril must have at least 2 dimensions. Found: %s" % tril) def _get_tril(self): """Gets the `tril` kwarg, with upper part zero-d out.""" return array_ops.matrix_band_part(self._tril, -1, 0) def _get_diag(self): """Gets the diagonal part of `tril` kwarg.""" return array_ops.matrix_diag_part(self._tril) def _shape(self): return self._tril.shape def _shape_tensor(self): return array_ops.shape(self._tril) def _assert_non_singular(self): return linear_operator_util.assert_no_entries_with_modulus_zero( self._get_diag(), message="Singular operator: Diagonal contained zero values.") def _matmul(self, x, adjoint=False, adjoint_arg=False): return math_ops.matmul( self._get_tril(), x, adjoint_a=adjoint, adjoint_b=adjoint_arg) def _determinant(self): return math_ops.reduce_prod(self._get_diag(), axis=[-1]) def _log_abs_determinant(self): return math_ops.reduce_sum( math_ops.log(math_ops.abs(self._get_diag())), axis=[-1]) def _solve(self, rhs, adjoint=False, adjoint_arg=False): rhs = linalg.adjoint(rhs) if adjoint_arg else rhs return linear_operator_util.matrix_triangular_solve_with_broadcast( self._get_tril(), rhs, lower=True, adjoint=adjoint) def _to_dense(self): return self._get_tril()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_lower_triangular.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like the identity matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorIdentity", "LinearOperatorScaledIdentity", ] class BaseLinearOperatorIdentity(linear_operator.LinearOperator): """Base class for Identity operators.""" def _check_num_rows_possibly_add_asserts(self): """Static check of init arg `num_rows`, possibly add asserts.""" # Possibly add asserts. if self._assert_proper_shapes: self._num_rows = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._num_rows, 0, message="Argument num_rows must be a 0-D Tensor."), check_ops.assert_non_negative( self._num_rows, message="Argument num_rows must be non-negative."), ], self._num_rows) # Static checks. if not self._num_rows.dtype.is_integer: raise TypeError("Argument num_rows must be integer type. Found:" " %s" % self._num_rows) num_rows_static = self._num_rows_static if num_rows_static is None: return # Cannot do any other static checks. if num_rows_static.ndim != 0: raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" " %s" % num_rows_static) if num_rows_static < 0: raise ValueError("Argument num_rows must be non-negative. Found:" " %s" % num_rows_static) def _min_matrix_dim(self): """Minimum of domain/range dimension, if statically available, else None.""" domain_dim = tensor_shape.dimension_value(self.domain_dimension) range_dim = tensor_shape.dimension_value(self.range_dimension) if domain_dim is None or range_dim is None: return None return min(domain_dim, range_dim) def _min_matrix_dim_tensor(self): """Minimum of domain/range dimension, as a tensor.""" return math_ops.reduce_min(self.shape_tensor()[-2:]) def _ones_diag(self): """Returns the diagonal of this operator as all ones.""" if self.shape.is_fully_defined(): d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) else: d_shape = array_ops.concat( [self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]], axis=0) return array_ops.ones(shape=d_shape, dtype=self.dtype) @tf_export("linalg.LinearOperatorIdentity") class LinearOperatorIdentity(BaseLinearOperatorIdentity): """`LinearOperator` acting like a [batch] square identity matrix. This operator acts like a [batch] identity matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorIdentity` is initialized with `num_rows`, and optionally `batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this operator efficiently passes through all arguments. If `batch_shape` is provided, broadcasting may occur, which will require making copies. ```python # Create a 2 x 2 identity matrix. operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32) operator.to_dense() ==> [[1., 0.] [0., 1.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> 0. x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor, same as x. y = tf.random.normal(shape=[3, 2, 4]) # Note that y.shape is compatible with operator.shape because operator.shape # is broadcast to [3, 2, 2]. # This broadcast does NOT require copying data, since we can infer that y # will be passed through without changing shape. We are always able to infer # this if the operator has no batch_shape. x = operator.solve(y) ==> Shape [3, 2, 4] Tensor, same as y. # Create a 2-batch of 2x2 identity matrices operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2]) operator.to_dense() ==> [[[1., 0.] [0., 1.]], [[1., 0.] [0., 1.]]] # Here, even though the operator has a batch shape, the input is the same as # the output, so x can be passed through without a copy. The operator is able # to detect that no broadcast is necessary because both x and the operator # have statically defined shape. x = ... Shape [2, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, same as x # Here the operator and x have different batch_shape, and are broadcast. # This requires a copy, since the output is different size than the input. x = ... Shape [1, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, equal to [x, x] ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` ### Performance If `batch_shape` initialization arg is `None`: * `operator.matmul(x)` is `O(1)` * `operator.solve(x)` is `O(1)` * `operator.determinant()` is `O(1)` If `batch_shape` initialization arg is provided, and static checks cannot rule out the need to broadcast: * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` * `operator.solve(x)` is `O(D1*...*Dd*N*R)` * `operator.determinant()` is `O(B1*...*Bb)` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, num_rows, batch_shape=None, dtype=None, is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True, assert_proper_shapes=False, name="LinearOperatorIdentity"): r"""Initialize a `LinearOperatorIdentity`. The `LinearOperatorIdentity` is initialized with arguments defining `dtype` and shape. This operator is able to broadcast the leading (batch) dimensions, which sometimes requires copying data. If `batch_shape` is `None`, the operator can take arguments of any batch shape without copying. See examples. Args: num_rows: Scalar non-negative integer `Tensor`. Number of rows in the corresponding identity matrix. batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading dimensions. If `None`, this operator has no leading dimensions. dtype: Data type of the matrix that this operator represents. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. assert_proper_shapes: Python `bool`. If `False`, only perform static checks that initialization and method arguments have proper shape. If `True`, and static checks are inconclusive, add asserts to the graph. name: A name for this `LinearOperator` Raises: ValueError: If `num_rows` is determined statically to be non-scalar, or negative. ValueError: If `batch_shape` is determined statically to not be 1-D, or negative. ValueError: If any of the following is not `True`: `{is_self_adjoint, is_non_singular, is_positive_definite}`. TypeError: If `num_rows` or `batch_shape` is ref-type (e.g. Variable). """ dtype = dtype or dtypes.float32 self._assert_proper_shapes = assert_proper_shapes with ops.name_scope(name): dtype = dtypes.as_dtype(dtype) if not is_self_adjoint: raise ValueError("An identity operator is always self adjoint.") if not is_non_singular: raise ValueError("An identity operator is always non-singular.") if not is_positive_definite: raise ValueError("An identity operator is always positive-definite.") if not is_square: raise ValueError("An identity operator is always square.") super(LinearOperatorIdentity, self).__init__( dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) linear_operator_util.assert_not_ref_type(num_rows, "num_rows") linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape") self._num_rows = linear_operator_util.shape_tensor( num_rows, name="num_rows") self._num_rows_static = tensor_util.constant_value(self._num_rows) self._check_num_rows_possibly_add_asserts() if batch_shape is None: self._batch_shape_arg = None else: self._batch_shape_arg = linear_operator_util.shape_tensor( batch_shape, name="batch_shape_arg") self._batch_shape_static = tensor_util.constant_value( self._batch_shape_arg) self._check_batch_shape_possibly_add_asserts() def _shape(self): matrix_shape = tensor_shape.TensorShape((self._num_rows_static, self._num_rows_static)) if self._batch_shape_arg is None: return matrix_shape batch_shape = tensor_shape.TensorShape(self._batch_shape_static) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0) if self._batch_shape_arg is None: return matrix_shape return array_ops.concat((self._batch_shape_arg, matrix_shape), 0) def _assert_non_singular(self): return control_flow_ops.no_op("assert_non_singular") def _assert_positive_definite(self): return control_flow_ops.no_op("assert_positive_definite") def _assert_self_adjoint(self): return control_flow_ops.no_op("assert_self_adjoint") def _possibly_broadcast_batch_shape(self, x): """Return 'x', possibly after broadcasting the leading dimensions.""" # If we have no batch shape, our batch shape broadcasts with everything! if self._batch_shape_arg is None: return x # Static attempt: # If we determine that no broadcast is necessary, pass x through # If we need a broadcast, add to an array of zeros. # # special_shape is the shape that, when broadcast with x's shape, will give # the correct broadcast_shape. Note that # We have already verified the second to last dimension of self.shape # matches x's shape in assert_compatible_matrix_dimensions. # Also, the final dimension of 'x' can have any shape. # Therefore, the final two dimensions of special_shape are 1's. special_shape = self.batch_shape.concatenate([1, 1]) bshape = array_ops.broadcast_static_shape(x.shape, special_shape) if special_shape.is_fully_defined(): # bshape.is_fully_defined iff special_shape.is_fully_defined. if bshape == x.shape: return x # Use the built in broadcasting of addition. zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) return x + zeros # Dynamic broadcast: # Always add to an array of zeros, rather than using a "cond", since a # cond would require copying data from GPU --> CPU. special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0) zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) return x + zeros def _matmul(self, x, adjoint=False, adjoint_arg=False): # Note that adjoint has no effect since this matrix is self-adjoint. x = linalg.adjoint(x) if adjoint_arg else x if self._assert_proper_shapes: aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) x = control_flow_ops.with_dependencies([aps], x) return self._possibly_broadcast_batch_shape(x) def _determinant(self): return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype) def _log_abs_determinant(self): return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) def _solve(self, rhs, adjoint=False, adjoint_arg=False): return self._matmul(rhs, adjoint_arg=adjoint_arg) def _trace(self): # Get Tensor of all ones of same shape as self.batch_shape. if self.batch_shape.is_fully_defined(): batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) else: batch_of_ones = array_ops.ones( shape=self.batch_shape_tensor(), dtype=self.dtype) if self._min_matrix_dim() is not None: return self._min_matrix_dim() * batch_of_ones else: return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) * batch_of_ones) def _diag_part(self): return self._ones_diag() def add_to_tensor(self, mat, name="add_to_tensor"): """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. Args: mat: `Tensor` with same `dtype` and shape broadcastable to `self`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`. """ with self._name_scope(name): mat = ops.convert_to_tensor(mat, name="mat") mat_diag = array_ops.matrix_diag_part(mat) new_diag = 1 + mat_diag return array_ops.matrix_set_diag(mat, new_diag) def _check_num_rows_possibly_add_asserts(self): """Static check of init arg `num_rows`, possibly add asserts.""" # Possibly add asserts. if self._assert_proper_shapes: self._num_rows = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._num_rows, 0, message="Argument num_rows must be a 0-D Tensor."), check_ops.assert_non_negative( self._num_rows, message="Argument num_rows must be non-negative."), ], self._num_rows) # Static checks. if not self._num_rows.dtype.is_integer: raise TypeError("Argument num_rows must be integer type. Found:" " %s" % self._num_rows) num_rows_static = self._num_rows_static if num_rows_static is None: return # Cannot do any other static checks. if num_rows_static.ndim != 0: raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" " %s" % num_rows_static) if num_rows_static < 0: raise ValueError("Argument num_rows must be non-negative. Found:" " %s" % num_rows_static) def _check_batch_shape_possibly_add_asserts(self): """Static check of init arg `batch_shape`, possibly add asserts.""" if self._batch_shape_arg is None: return # Possibly add asserts if self._assert_proper_shapes: self._batch_shape_arg = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._batch_shape_arg, 1, message="Argument batch_shape must be a 1-D Tensor."), check_ops.assert_non_negative( self._batch_shape_arg, message="Argument batch_shape must be non-negative."), ], self._batch_shape_arg) # Static checks if not self._batch_shape_arg.dtype.is_integer: raise TypeError("Argument batch_shape must be integer type. Found:" " %s" % self._batch_shape_arg) if self._batch_shape_static is None: return # Cannot do any other static checks. if self._batch_shape_static.ndim != 1: raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:" " %s" % self._batch_shape_static) if np.any(self._batch_shape_static < 0): raise ValueError("Argument batch_shape must be non-negative. Found:" "%s" % self._batch_shape_static) @tf_export("linalg.LinearOperatorScaledIdentity") class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity): """`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`. This operator acts like a scaled [batch] identity matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is a scaled version of the `N x N` identity matrix. `LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier` (a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the `multiplier` determines the scale for each batch member. ```python # Create a 2 x 2 scaled identity matrix. operator = LinearOperatorIdentity(num_rows=2, multiplier=3.) operator.to_dense() ==> [[3., 0.] [0., 3.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> 2 * Log[3] x = ... Shape [2, 4] Tensor operator.matmul(x) ==> 3 * x y = tf.random.normal(shape=[3, 2, 4]) # Note that y.shape is compatible with operator.shape because operator.shape # is broadcast to [3, 2, 2]. x = operator.solve(y) ==> 3 * x # Create a 2-batch of 2x2 identity matrices operator = LinearOperatorIdentity(num_rows=2, multiplier=5.) operator.to_dense() ==> [[[5., 0.] [0., 5.]], [[5., 0.] [0., 5.]]] x = ... Shape [2, 2, 3] operator.matmul(x) ==> 5 * x # Here the operator and x have different batch_shape, and are broadcast. x = ... Shape [1, 2, 3] operator.matmul(x) ==> 5 * x ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` ### Performance * `operator.matmul(x)` is `O(D1*...*Dd*N*R)` * `operator.solve(x)` is `O(D1*...*Dd*N*R)` * `operator.determinant()` is `O(D1*...*Dd)` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, num_rows, multiplier, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, assert_proper_shapes=False, name="LinearOperatorScaledIdentity"): r"""Initialize a `LinearOperatorScaledIdentity`. The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which determines the size of each identity matrix, and a `multiplier`, which defines `dtype`, batch shape, and scale of each matrix. This operator is able to broadcast the leading (batch) dimensions. Args: num_rows: Scalar non-negative integer `Tensor`. Number of rows in the corresponding identity matrix. multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar). is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. assert_proper_shapes: Python `bool`. If `False`, only perform static checks that initialization and method arguments have proper shape. If `True`, and static checks are inconclusive, add asserts to the graph. name: A name for this `LinearOperator` Raises: ValueError: If `num_rows` is determined statically to be non-scalar, or negative. """ self._assert_proper_shapes = assert_proper_shapes with ops.name_scope(name, values=[multiplier, num_rows]): self._multiplier = linear_operator_util.convert_nonref_to_tensor( multiplier, name="multiplier") # Check and auto-set hints. if not self._multiplier.dtype.is_complex: if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison raise ValueError("A real diagonal operator is always self adjoint.") else: is_self_adjoint = True if not is_square: raise ValueError("A ScaledIdentity operator is always square.") linear_operator_util.assert_not_ref_type(num_rows, "num_rows") super(LinearOperatorScaledIdentity, self).__init__( dtype=self._multiplier.dtype.base_dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) self._num_rows = linear_operator_util.shape_tensor( num_rows, name="num_rows") self._num_rows_static = tensor_util.constant_value(self._num_rows) self._check_num_rows_possibly_add_asserts() self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype) self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows, self.dtype.real_dtype) def _shape(self): matrix_shape = tensor_shape.TensorShape((self._num_rows_static, self._num_rows_static)) batch_shape = self.multiplier.shape return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0) batch_shape = array_ops.shape(self.multiplier) return array_ops.concat((batch_shape, matrix_shape), 0) def _assert_non_singular(self): return check_ops.assert_positive( math_ops.abs(self.multiplier), message="LinearOperator was singular") def _assert_positive_definite(self): return check_ops.assert_positive( math_ops.real(self.multiplier), message="LinearOperator was not positive definite.") def _assert_self_adjoint(self): imag_multiplier = math_ops.imag(self.multiplier) return check_ops.assert_equal( array_ops.zeros_like(imag_multiplier), imag_multiplier, message="LinearOperator was not self-adjoint") def _make_multiplier_matrix(self, conjugate=False): # Shape [B1,...Bb, 1, 1] multiplier_matrix = array_ops.expand_dims( array_ops.expand_dims(self.multiplier, -1), -1) if conjugate: multiplier_matrix = math_ops.conj(multiplier_matrix) return multiplier_matrix def _matmul(self, x, adjoint=False, adjoint_arg=False): x = linalg.adjoint(x) if adjoint_arg else x if self._assert_proper_shapes: aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) x = control_flow_ops.with_dependencies([aps], x) return x * self._make_multiplier_matrix(conjugate=adjoint) def _determinant(self): return self.multiplier**self._num_rows_cast_to_dtype def _log_abs_determinant(self): return self._num_rows_cast_to_real_dtype * math_ops.log( math_ops.abs(self.multiplier)) def _solve(self, rhs, adjoint=False, adjoint_arg=False): rhs = linalg.adjoint(rhs) if adjoint_arg else rhs if self._assert_proper_shapes: aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs) rhs = control_flow_ops.with_dependencies([aps], rhs) return rhs / self._make_multiplier_matrix(conjugate=adjoint) def _trace(self): # Get Tensor of all ones of same shape as self.batch_shape. if self.batch_shape.is_fully_defined(): batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype) else: batch_of_ones = array_ops.ones( shape=self.batch_shape_tensor(), dtype=self.dtype) if self._min_matrix_dim() is not None: return self.multiplier * self._min_matrix_dim() * batch_of_ones else: return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) * batch_of_ones) def _diag_part(self): return self._ones_diag() * self.multiplier[..., array_ops.newaxis] def add_to_tensor(self, mat, name="add_to_tensor"): """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. Args: mat: `Tensor` with same `dtype` and shape broadcastable to `self`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`. """ with self._name_scope(name): # Shape [B1,...,Bb, 1] multiplier_vector = array_ops.expand_dims(self.multiplier, -1) # Shape [C1,...,Cc, M, M] mat = ops.convert_to_tensor(mat, name="mat") # Shape [C1,...,Cc, M] mat_diag = array_ops.matrix_diag_part(mat) # multiplier_vector broadcasts here. new_diag = multiplier_vector + mat_diag return array_ops.matrix_set_diag(mat, new_diag) @property def multiplier(self): """The [batch] scalar `Tensor`, `c` in `cI`.""" return self._multiplier
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_identity.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Create a Block Diagonal operator from one or more `LinearOperators`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import common_shapes from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorBlockDiag"] @tf_export("linalg.LinearOperatorBlockDiag") class LinearOperatorBlockDiag(linear_operator.LinearOperator): """Combines one or more `LinearOperators` in to a Block Diagonal matrix. This operator combines one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator`, whose underlying matrix representation is square and has each operator `opi` on the main diagonal, and zero's elsewhere. #### Shape compatibility If `opj` acts like a [batch] square matrix `Aj`, then `op_combined` acts like the [batch] square matrix formed by having each matrix `Aj` on the main diagonal. Each `opj` is required to represent a square matrix, and hence will have shape `batch_shape_j + [M_j, M_j]`. If `opj` has shape `batch_shape_j + [M_j, M_j]`, then the combined operator has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate batch shapes broadcast. Even if the combined shape is well defined, the combined operator's methods may fail due to lack of broadcasting ability in the defining operators' methods. ```python # Create a 4 x 4 linear operator combined of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) operator = LinearOperatorBlockDiag([operator_1, operator_2]) operator.to_dense() ==> [[1., 2., 0., 0.], [3., 4., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]] operator.shape ==> [4, 4] operator.log_abs_determinant() ==> scalar Tensor x1 = ... # Shape [2, 2] Tensor x2 = ... # Shape [2, 2] Tensor x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor operator.matmul(x) ==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)]) # Create a [2, 3] batch of 4 x 4 linear operators. matrix_44 = tf.random.normal(shape=[2, 3, 4, 4]) operator_44 = LinearOperatorFullMatrix(matrix) # Create a [1, 3] batch of 5 x 5 linear operators. matrix_55 = tf.random.normal(shape=[1, 3, 5, 5]) operator_55 = LinearOperatorFullMatrix(matrix_55) # Combine to create a [2, 3] batch of 9 x 9 operators. operator_99 = LinearOperatorBlockDiag([operator_44, operator_55]) # Create a shape [2, 3, 9] vector. x = tf.random.normal(shape=[2, 3, 9]) operator_99.matmul(x) ==> Shape [2, 3, 9] Tensor ``` #### Performance The performance of `LinearOperatorBlockDiag` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name=None): r"""Initialize a `LinearOperatorBlockDiag`. `LinearOperatorBlockDiag` is initialized with a list of operators `[op_1,...,op_J]`. Args: operators: Iterable of `LinearOperator` objects, each with the same `dtype` and composable shape. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. This is true by default, and will raise a `ValueError` otherwise. name: A name for this `LinearOperator`. Default is the individual operators names joined with `_o_`. Raises: TypeError: If all operators do not have the same `dtype`. ValueError: If `operators` is empty or are non-square. """ # Validate operators. check_ops.assert_proper_iterable(operators) operators = list(operators) if not operators: raise ValueError( "Expected a non-empty list of operators. Found: %s" % operators) self._operators = operators # Validate dtype. dtype = operators[0].dtype for operator in operators: if operator.dtype != dtype: name_type = (str((o.name, o.dtype)) for o in operators) raise TypeError( "Expected all operators to have the same dtype. Found %s" % " ".join(name_type)) # Auto-set and check hints. if all(operator.is_non_singular for operator in operators): if is_non_singular is False: raise ValueError( "The direct sum of non-singular operators is always non-singular.") is_non_singular = True if all(operator.is_self_adjoint for operator in operators): if is_self_adjoint is False: raise ValueError( "The direct sum of self-adjoint operators is always self-adjoint.") is_self_adjoint = True if all(operator.is_positive_definite for operator in operators): if is_positive_definite is False: raise ValueError( "The direct sum of positive definite operators is always " "positive definite.") is_positive_definite = True if not (is_square and all(operator.is_square for operator in operators)): raise ValueError( "Can only represent a block diagonal of square matrices.") # Initialization. graph_parents = [] for operator in operators: graph_parents.extend(operator.graph_parents) if name is None: # Using ds to mean direct sum. name = "_ds_".join(operator.name for operator in operators) with ops.name_scope(name, values=graph_parents): super(LinearOperatorBlockDiag, self).__init__( dtype=dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=True, name=name) @property def operators(self): return self._operators def _shape(self): # Get final matrix shape. domain_dimension = self.operators[0].domain_dimension range_dimension = self.operators[0].range_dimension for operator in self.operators[1:]: domain_dimension += operator.domain_dimension range_dimension += operator.range_dimension matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension]) # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape for operator in self.operators[1:]: batch_shape = common_shapes.broadcast_shape( batch_shape, operator.batch_shape) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): # Avoid messy broadcasting if possible. if self.shape.is_fully_defined(): return ops.convert_to_tensor( self.shape.as_list(), dtype=dtypes.int32, name="shape") domain_dimension = self.operators[0].domain_dimension_tensor() range_dimension = self.operators[0].range_dimension_tensor() for operator in self.operators[1:]: domain_dimension += operator.domain_dimension_tensor() range_dimension += operator.range_dimension_tensor() matrix_shape = array_ops.stack([domain_dimension, range_dimension]) # Dummy Tensor of zeros. Will never be materialized. zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) for operator in self.operators[1:]: zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) batch_shape = array_ops.shape(zeros) return array_ops.concat((batch_shape, matrix_shape), 0) def _matmul(self, x, adjoint=False, adjoint_arg=False): split_dim = -1 if adjoint_arg else -2 # Split input by rows normally, and otherwise columns. split_x = self._split_input_into_blocks(x, axis=split_dim) result_list = [] for index, operator in enumerate(self.operators): result_list += [operator.matmul( split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] result_list = linear_operator_util.broadcast_matrix_batch_dims( result_list) return array_ops.concat(result_list, axis=-2) def _determinant(self): result = self.operators[0].determinant() for operator in self.operators[1:]: result *= operator.determinant() return result def _log_abs_determinant(self): result = self.operators[0].log_abs_determinant() for operator in self.operators[1:]: result += operator.log_abs_determinant() return result def _solve(self, rhs, adjoint=False, adjoint_arg=False): split_dim = -1 if adjoint_arg else -2 # Split input by rows normally, and otherwise columns. split_rhs = self._split_input_into_blocks(rhs, axis=split_dim) solution_list = [] for index, operator in enumerate(self.operators): solution_list += [operator.solve( split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)] solution_list = linear_operator_util.broadcast_matrix_batch_dims( solution_list) return array_ops.concat(solution_list, axis=-2) def _diag_part(self): diag_list = [] for operator in self.operators: # Extend the axis for broadcasting. diag_list += [operator.diag_part()[..., array_ops.newaxis]] diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list) diagonal = array_ops.concat(diag_list, axis=-2) return array_ops.squeeze(diagonal, axis=-1) def _trace(self): result = self.operators[0].trace() for operator in self.operators[1:]: result += operator.trace() return result def _to_dense(self): num_cols = 0 rows = [] broadcasted_blocks = [operator.to_dense() for operator in self.operators] broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims( broadcasted_blocks) for block in broadcasted_blocks: batch_row_shape = array_ops.shape(block)[:-1] zeros_to_pad_before_shape = array_ops.concat( [batch_row_shape, [num_cols]], axis=-1) zeros_to_pad_before = array_ops.zeros( shape=zeros_to_pad_before_shape, dtype=block.dtype) num_cols += array_ops.shape(block)[-1] zeros_to_pad_after_shape = array_ops.concat( [batch_row_shape, [self.domain_dimension_tensor() - num_cols]], axis=-1) zeros_to_pad_after = array_ops.zeros( shape=zeros_to_pad_after_shape, dtype=block.dtype) rows.append(array_ops.concat( [zeros_to_pad_before, block, zeros_to_pad_after], axis=-1)) mat = array_ops.concat(rows, axis=-2) mat.set_shape(self.shape) return mat def _assert_non_singular(self): return control_flow_ops.group([ operator.assert_non_singular() for operator in self.operators]) def _assert_self_adjoint(self): return control_flow_ops.group([ operator.assert_self_adjoint() for operator in self.operators]) def _assert_positive_definite(self): return control_flow_ops.group([ operator.assert_positive_definite() for operator in self.operators]) def _split_input_into_blocks(self, x, axis=-1): """Split `x` into blocks matching `operators`'s `domain_dimension`. Specifically, if we have a block diagonal matrix, with block sizes `[M_j, M_j] j = 1..J`, this method splits `x` on `axis` into `J` tensors, whose shape at `axis` is `M_j`. Args: x: `Tensor`. `x` is split into `J` tensors. axis: Python `Integer` representing the axis to split `x` on. Returns: A list of `Tensor`s. """ block_sizes = [] if self.shape.is_fully_defined(): for operator in self.operators: block_sizes += [operator.domain_dimension.value] else: for operator in self.operators: block_sizes += [operator.domain_dimension_tensor()] return array_ops.split(x, block_sizes, axis=axis)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_block_diag.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Composes one or more `LinearOperators`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import common_shapes from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorComposition"] @tf_export("linalg.LinearOperatorComposition") class LinearOperatorComposition(linear_operator.LinearOperator): """Composes one or more `LinearOperators`. This operator composes one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator` with action defined by: ``` op_composed(x) := op1(op2(...(opJ(x)...)) ``` If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the [batch] matrix formed with the multiplication `A1 A2...AJ`. If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have `N_j = M_{j+1}`, in which case the composed operator has shape equal to `broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate batch shapes broadcast. Even if the composed shape is well defined, the composed operator's methods may fail due to lack of broadcasting ability in the defining operators' methods. ```python # Create a 2 x 2 linear operator composed of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]]) operator = LinearOperatorComposition([operator_1, operator_2]) operator.to_dense() ==> [[1., 2.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 5 linear operators. matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) operator_45 = LinearOperatorFullMatrix(matrix) # Create a [2, 3] batch of 5 x 6 linear operators. matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) operator_56 = LinearOperatorFullMatrix(matrix_56) # Compose to create a [2, 3] batch of 4 x 6 operators. operator_46 = LinearOperatorComposition([operator_45, operator_56]) # Create a shape [2, 3, 6, 2] vector. x = tf.random.normal(shape=[2, 3, 6, 2]) operator.matmul(x) ==> Shape [2, 3, 4, 2] Tensor ``` #### Performance The performance of `LinearOperatorComposition` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize a `LinearOperatorComposition`. `LinearOperatorComposition` is initialized with a list of operators `[op_1,...,op_J]`. For the `matmul` method to be well defined, the composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have similar constraints. Args: operators: Iterable of `LinearOperator` objects, each with the same `dtype` and composable shape. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Default is the individual operators names joined with `_o_`. Raises: TypeError: If all operators do not have the same `dtype`. ValueError: If `operators` is empty. """ # Validate operators. check_ops.assert_proper_iterable(operators) operators = list(operators) if not operators: raise ValueError( "Expected a non-empty list of operators. Found: %s" % operators) self._operators = operators # Validate dtype. dtype = operators[0].dtype for operator in operators: if operator.dtype != dtype: name_type = (str((o.name, o.dtype)) for o in operators) raise TypeError( "Expected all operators to have the same dtype. Found %s" % " ".join(name_type)) # Auto-set and check hints. if all(operator.is_non_singular for operator in operators): if is_non_singular is False: raise ValueError( "The composition of non-singular operators is always non-singular.") is_non_singular = True # Initialization. graph_parents = [] for operator in operators: graph_parents.extend(operator.graph_parents) if name is None: name = "_o_".join(operator.name for operator in operators) with ops.name_scope(name, values=graph_parents): super(LinearOperatorComposition, self).__init__( dtype=dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @property def operators(self): return self._operators def _shape(self): # Get final matrix shape. domain_dimension = self.operators[0].domain_dimension for operator in self.operators[1:]: domain_dimension.assert_is_compatible_with(operator.range_dimension) domain_dimension = operator.domain_dimension matrix_shape = tensor_shape.TensorShape( [self.operators[0].range_dimension, self.operators[-1].domain_dimension]) # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape for operator in self.operators[1:]: batch_shape = common_shapes.broadcast_shape( batch_shape, operator.batch_shape) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): # Avoid messy broadcasting if possible. if self.shape.is_fully_defined(): return ops.convert_to_tensor( self.shape.as_list(), dtype=dtypes.int32, name="shape") # Don't check the matrix dimensions. That would add unnecessary Asserts to # the graph. Things will fail at runtime naturally if shapes are # incompatible. matrix_shape = array_ops.stack([ self.operators[0].range_dimension_tensor(), self.operators[-1].domain_dimension_tensor() ]) # Dummy Tensor of zeros. Will never be materialized. zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor()) for operator in self.operators[1:]: zeros += array_ops.zeros(shape=operator.batch_shape_tensor()) batch_shape = array_ops.shape(zeros) return array_ops.concat((batch_shape, matrix_shape), 0) def _matmul(self, x, adjoint=False, adjoint_arg=False): # If self.operators = [A, B], and not adjoint, then # matmul_order_list = [B, A]. # As a result, we return A.matmul(B.matmul(x)) if adjoint: matmul_order_list = self.operators else: matmul_order_list = list(reversed(self.operators)) result = matmul_order_list[0].matmul( x, adjoint=adjoint, adjoint_arg=adjoint_arg) for operator in matmul_order_list[1:]: result = operator.matmul(result, adjoint=adjoint) return result def _determinant(self): result = self.operators[0].determinant() for operator in self.operators[1:]: result *= operator.determinant() return result def _log_abs_determinant(self): result = self.operators[0].log_abs_determinant() for operator in self.operators[1:]: result += operator.log_abs_determinant() return result def _solve(self, rhs, adjoint=False, adjoint_arg=False): # TODO(langmore) Implement solve using solve_ls if some intermediate # operator maps to a high dimensional space. # In that case, an exact solve may still be possible. # If self.operators = [A, B], and not adjoint, then # solve_order_list = [A, B]. # As a result, we return B.solve(A.solve(x)) if adjoint: solve_order_list = list(reversed(self.operators)) else: solve_order_list = self.operators solution = solve_order_list[0].solve( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) for operator in solve_order_list[1:]: solution = operator.solve(solution, adjoint=adjoint) return solution
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_composition.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a Toeplitz matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.signal import fft_ops from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorToeplitz",] @tf_export("linalg.LinearOperatorToeplitz") class LinearOperatorToeplitz(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] of toeplitz matrices. This operator acts like a [batch] Toeplitz matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of toeplitz matrices Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated with two vectors. One represents the first column of the matrix, and the other represents the first row. Below is a 4 x 4 example: ``` A = |a b c d| |e a b c| |f e a b| |g f e a| ``` #### Example of a Toeplitz operator. ```python # Create a 3 x 3 Toeplitz operator. col = [1., 2., 3.] row = [1., 4., -9.] operator = LinearOperatorToeplitz(col, row) operator.to_dense() ==> [[1., 4., -9.], [2., 1., 4.], [3., 2., 1.]] operator.shape ==> [3, 3] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [3, 4] Tensor operator.matmul(x) ==> Shape [3, 4] Tensor #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, N], with b >= 0 x.shape = [C1,...,Cc] + [N, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, col, row, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorToeplitz"): r"""Initialize a `LinearOperatorToeplitz`. Args: col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The first column of the operator. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Note that the first entry of `col` is assumed to be the same as the first entry of `row`. row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`. The first row of the operator. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Note that the first entry of `row` is assumed to be the same as the first entry of `col`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `diag.dtype` is real, this is auto-set to `True`. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. """ with ops.name_scope(name, values=[row, col]): self._row = ops.convert_to_tensor(row, name="row") self._col = ops.convert_to_tensor(col, name="col") self._check_row_col(self._row, self._col) circulant_col = array_ops.concat( [self._col, array_ops.zeros_like(self._col[..., 0:1]), array_ops.reverse(self._row[..., 1:], axis=[-1])], axis=-1) # To be used for matmul. self._circulant = linear_operator_circulant.LinearOperatorCirculant( fft_ops.fft(_to_complex(circulant_col)), input_output_dtype=self._row.dtype) if is_square is False: # pylint:disable=g-bool-id-comparison raise ValueError("Only square Toeplitz operators currently supported.") is_square = True super(LinearOperatorToeplitz, self).__init__( dtype=self._row.dtype, graph_parents=[self._row, self._col], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_row_col(self, row, col): """Static check of row and column.""" for name, tensor in [["row", row], ["col", col]]: if tensor.shape.ndims is not None and tensor.shape.ndims < 1: raise ValueError("Argument {} must have at least 1 dimension. " "Found: {}".format(name, tensor)) if row.shape[-1] is not None and col.shape[-1] is not None: if row.shape[-1] != col.shape[-1]: raise ValueError( "Expected square matrix, got row and col with mismatched " "dimensions.") def _shape(self): # If d_shape = [5, 3], we return [5, 3, 3]. v_shape = array_ops.broadcast_static_shape( self.row.shape, self.col.shape) return v_shape.concatenate(v_shape[-1:]) def _shape_tensor(self): v_shape = array_ops.broadcast_dynamic_shape( array_ops.shape(self.row), array_ops.shape(self.col)) k = v_shape[-1] return array_ops.concat((v_shape, [k]), 0) def _assert_self_adjoint(self): return check_ops.assert_equal( self.row, self.col, message=("row and col are not the same, and " "so this operator is not self-adjoint.")) # TODO(srvasude): Add efficient solver and determinant calculations to this # class (based on Levinson recursion.) def _matmul(self, x, adjoint=False, adjoint_arg=False): # Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform # efficient matrix multiplications. Given a Toeplitz matrix with first row # [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)}, # let C by the circulant matrix with first column [t0, t_{-1}, ..., # t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x` # `n` zeros, to make it a vector of length `2n` (call it y). It can be shown # that if we take the first n entries of `Cy`, this is equal to the Toeplitz # multiplication. See: # http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf # for more details. x = linalg.adjoint(x) if adjoint_arg else x expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2) result = self._circulant.matmul( expanded_x, adjoint=adjoint, adjoint_arg=False) return math_ops.cast( result[..., :self.domain_dimension_tensor(), :], self.dtype) def _trace(self): return math_ops.cast( self.domain_dimension_tensor(), dtype=self.dtype) * self.col[..., 0] def _diag_part(self): diag_entry = self.col[..., 0:1] return diag_entry * array_ops.ones( [self.domain_dimension_tensor()], self.dtype) @property def col(self): return self._col @property def row(self): return self._row def _to_complex(x): dtype = dtypes.complex64 if x.dtype in [dtypes.float64, dtypes.complex128]: dtype = dtypes.complex128 return math_ops.cast(x, dtype)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_toeplitz.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for linear algebra.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_linalg_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import special_math_ops from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export # Linear algebra ops. band_part = array_ops.matrix_band_part cholesky = linalg_ops.cholesky cholesky_solve = linalg_ops.cholesky_solve det = linalg_ops.matrix_determinant slogdet = gen_linalg_ops.log_matrix_determinant tf_export('linalg.slogdet')(slogdet) diag = array_ops.matrix_diag diag_part = array_ops.matrix_diag_part eigh = linalg_ops.self_adjoint_eig eigvalsh = linalg_ops.self_adjoint_eigvals einsum = special_math_ops.einsum eye = linalg_ops.eye inv = linalg_ops.matrix_inverse logm = gen_linalg_ops.matrix_logarithm lu = gen_linalg_ops.lu tf_export('linalg.logm')(logm) lstsq = linalg_ops.matrix_solve_ls norm = linalg_ops.norm qr = linalg_ops.qr set_diag = array_ops.matrix_set_diag solve = linalg_ops.matrix_solve sqrtm = linalg_ops.matrix_square_root svd = linalg_ops.svd tensordot = math_ops.tensordot trace = math_ops.trace transpose = array_ops.matrix_transpose triangular_solve = linalg_ops.matrix_triangular_solve @tf_export('linalg.logdet') @dispatch.add_dispatch_support def logdet(matrix, name=None): """Computes log of the determinant of a hermitian positive definite matrix. ```python # Compute the determinant of a matrix while reducing the chance of over- or underflow: A = ... # shape 10 x 10 det = tf.exp(tf.linalg.logdet(A)) # scalar ``` Args: matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op`. Defaults to `logdet`. Returns: The natural log of the determinant of `matrix`. @compatibility(numpy) Equivalent to numpy.linalg.slogdet, although no sign is returned since only hermitian positive definite matrices are supported. @end_compatibility """ # This uses the property that the log det(A) = 2*sum(log(real(diag(C)))) # where C is the cholesky decomposition of A. with ops.name_scope(name, 'logdet', [matrix]): chol = gen_linalg_ops.cholesky(matrix) return 2.0 * math_ops.reduce_sum( math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), axis=[-1]) @tf_export('linalg.adjoint') @dispatch.add_dispatch_support def adjoint(matrix, name=None): """Transposes the last two dimensions of and conjugates tensor `matrix`. For example: ```python x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]) tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j], # [2 - 2j, 5 - 5j], # [3 - 3j, 6 - 6j]] ``` Args: matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op` (optional). Returns: The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of matrix. """ with ops.name_scope(name, 'adjoint', [matrix]): matrix = ops.convert_to_tensor(matrix, name='matrix') return array_ops.matrix_transpose(matrix, conjugate=True) # This section is ported nearly verbatim from Eigen's implementation: # https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html def _matrix_exp_pade3(matrix): """3rd-order Pade approximant for matrix exponential.""" b = [120.0, 60.0, 12.0] b = [constant_op.constant(x, matrix.dtype) for x in b] ident = linalg_ops.eye( array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype) matrix_2 = math_ops.matmul(matrix, matrix) tmp = matrix_2 + b[1] * ident matrix_u = math_ops.matmul(matrix, tmp) matrix_v = b[2] * matrix_2 + b[0] * ident return matrix_u, matrix_v def _matrix_exp_pade5(matrix): """5th-order Pade approximant for matrix exponential.""" b = [30240.0, 15120.0, 3360.0, 420.0, 30.0] b = [constant_op.constant(x, matrix.dtype) for x in b] ident = linalg_ops.eye( array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype) matrix_2 = math_ops.matmul(matrix, matrix) matrix_4 = math_ops.matmul(matrix_2, matrix_2) tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident matrix_u = math_ops.matmul(matrix, tmp) matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident return matrix_u, matrix_v def _matrix_exp_pade7(matrix): """7th-order Pade approximant for matrix exponential.""" b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0] b = [constant_op.constant(x, matrix.dtype) for x in b] ident = linalg_ops.eye( array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype) matrix_2 = math_ops.matmul(matrix, matrix) matrix_4 = math_ops.matmul(matrix_2, matrix_2) matrix_6 = math_ops.matmul(matrix_4, matrix_2) tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident matrix_u = math_ops.matmul(matrix, tmp) matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident return matrix_u, matrix_v def _matrix_exp_pade9(matrix): """9th-order Pade approximant for matrix exponential.""" b = [ 17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0, 2162160.0, 110880.0, 3960.0, 90.0 ] b = [constant_op.constant(x, matrix.dtype) for x in b] ident = linalg_ops.eye( array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype) matrix_2 = math_ops.matmul(matrix, matrix) matrix_4 = math_ops.matmul(matrix_2, matrix_2) matrix_6 = math_ops.matmul(matrix_4, matrix_2) matrix_8 = math_ops.matmul(matrix_6, matrix_2) tmp = ( matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident) matrix_u = math_ops.matmul(matrix, tmp) matrix_v = ( b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident) return matrix_u, matrix_v def _matrix_exp_pade13(matrix): """13th-order Pade approximant for matrix exponential.""" b = [ 64764752532480000.0, 32382376266240000.0, 7771770303897600.0, 1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0, 33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0 ] b = [constant_op.constant(x, matrix.dtype) for x in b] ident = linalg_ops.eye( array_ops.shape(matrix)[-2], batch_shape=array_ops.shape(matrix)[:-2], dtype=matrix.dtype) matrix_2 = math_ops.matmul(matrix, matrix) matrix_4 = math_ops.matmul(matrix_2, matrix_2) matrix_6 = math_ops.matmul(matrix_4, matrix_2) tmp_u = ( math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident) matrix_u = math_ops.matmul(matrix, tmp_u) tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2 matrix_v = ( math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident) return matrix_u, matrix_v @tf_export('linalg.expm') def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin r"""Computes the matrix exponential of one or more square matrices. exp(A) = \sum_{n=0}^\infty A^n/n! The exponential is computed using a combination of the scaling and squaring method and the Pade approximation. Details can be found in: Nicholas J. Higham, "The scaling and squaring method for the matrix exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005. The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form square matrices. The output is a tensor of the same shape as the input containing the exponential for all input submatrices `[..., :, :]`. Args: input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or `complex128` with shape `[..., M, M]`. name: A name to give this `Op` (optional). Returns: the matrix exponential of the input. Raises: ValueError: An unsupported type is provided as input. @compatibility(scipy) Equivalent to scipy.linalg.expm @end_compatibility """ with ops.name_scope(name, 'matrix_exponential', [input]): matrix = ops.convert_to_tensor(input, name='input') if matrix.shape[-2:] == [0, 0]: return matrix batch_shape = matrix.shape[:-2] if not batch_shape.is_fully_defined(): batch_shape = array_ops.shape(matrix)[:-2] # reshaping the batch makes the where statements work better matrix = array_ops.reshape( matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0)) l1_norm = math_ops.reduce_max( math_ops.reduce_sum( math_ops.abs(matrix), axis=array_ops.size(array_ops.shape(matrix)) - 2), axis=-1) const = lambda x: constant_op.constant(x, l1_norm.dtype) def _nest_where(vals, cases): assert len(vals) == len(cases) - 1 if len(vals) == 1: return array_ops.where( math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1]) else: return array_ops.where( math_ops.less(l1_norm, const(vals[0])), cases[0], _nest_where(vals[1:], cases[1:])) if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]: maxnorm = const(3.925724783138660) squarings = math_ops.maximum( math_ops.floor( math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0) u3, v3 = _matrix_exp_pade3(matrix) u5, v5 = _matrix_exp_pade5(matrix) u7, v7 = _matrix_exp_pade7(matrix / math_ops.pow( constant_op.constant(2.0, dtype=matrix.dtype), math_ops.cast( squarings, matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis]) conds = (4.258730016922831e-001, 1.880152677804762e+000) u = _nest_where(conds, (u3, u5, u7)) v = _nest_where(conds, (v3, v5, v7)) elif matrix.dtype in [dtypes.float64, dtypes.complex128]: maxnorm = const(5.371920351148152) squarings = math_ops.maximum( math_ops.floor( math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0) u3, v3 = _matrix_exp_pade3(matrix) u5, v5 = _matrix_exp_pade5(matrix) u7, v7 = _matrix_exp_pade7(matrix) u9, v9 = _matrix_exp_pade9(matrix) u13, v13 = _matrix_exp_pade13(matrix / math_ops.pow( constant_op.constant(2.0, dtype=matrix.dtype), math_ops.cast( squarings, matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis]) conds = (1.495585217958292e-002, 2.539398330063230e-001, 9.504178996162932e-001, 2.097847961257068e+000) u = _nest_where(conds, (u3, u5, u7, u9, u13)) v = _nest_where(conds, (v3, v5, v7, v9, v13)) else: raise ValueError('tf.linalg.expm does not support matrices of type %s' % matrix.dtype) numer = u + v denom = -u + v result = linalg_ops.matrix_solve(denom, numer) max_squarings = math_ops.reduce_max(squarings) i = const(0.0) c = lambda i, r: math_ops.less(i, max_squarings) def b(i, r): return i + 1, array_ops.where( math_ops.less(i, squarings), math_ops.matmul(r, r), r) _, result = control_flow_ops.while_loop(c, b, [i, result]) if not matrix.shape.is_fully_defined(): return array_ops.reshape( result, array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0)) return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:])) @tf_export('linalg.tridiagonal_solve') def tridiagonal_solve(diagonals, rhs, diagonals_format='compact', transpose_rhs=False, conjugate_rhs=False, name=None, partial_pivoting=True): r"""Solves tridiagonal systems of equations. The input can be supplied in various formats: `matrix`, `sequence` and `compact`, specified by the `diagonals_format` arg. In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with two inner-most dimensions representing the square tridiagonal matrices. Elements outside of the three diagonals will be ignored. In `sequence` format, `diagonals` are supplied as a tuple or list of three tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either `M-1` or `M`; in the latter case, the last element of superdiagonal and the first element of subdiagonal will be ignored. In `compact` format the three diagonals are brought together into one tensor of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, diagonals, and subdiagonals, in order. Similarly to `sequence` format, elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. The `compact` format is recommended as the one with best performance. In case you need to cast a tensor into a compact format manually, use `tf.gather_nd`. An example for a tensor of shape [m, m]: ```python rhs = tf.constant([...]) matrix = tf.constant([[...]]) m = matrix.shape[0] dummy_idx = [0, 0] # An arbitrary element to use as a dummy indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal [[i, i] for i in range(m)], # Diagonal [dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal diagonals=tf.gather_nd(matrix, indices) x = tf.linalg.tridiagonal_solve(diagonals, rhs) ``` Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or `[..., M, K]`. The latter allows to simultaneously solve K systems with the same left-hand sides and K different right-hand sides. If `transpose_rhs` is set to `True` the expected shape is `[..., M]` or `[..., K, M]`. The batch dimensions, denoted as `...`, must be the same in `diagonals` and `rhs`. The output is a tensor of the same shape as `rhs`: either `[..., M]` or `[..., M, K]`. The op isn't guaranteed to raise an error if the input matrix is not invertible. `tf.debugging.check_numerics` can be applied to the output to detect invertibility problems. **Note**: with large batch sizes, the computation on the GPU may be slow, if either `partial_pivoting=True` or there are multiple right-hand sides (`K > 1`). If this issue arises, consider if it's possible to disable pivoting and have `K = 1`, or, alternatively, consider using CPU. On CPU, solution is computed via Gaussian elimination with or without partial pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv Args: diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The shape depends of `diagonals_format`, see description above. Must be `float32`, `float64`, `complex64`, or `complex128`. rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as `diagonals`. diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is `compact`. transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect if the shape of rhs is [..., M]). conjugate_rhs: If `True`, `rhs` is conjugated before solving. name: A name to give this `Op` (optional). partial_pivoting: whether to perform partial pivoting. `True` by default. Partial pivoting makes the procedure more stable, but slower. Partial pivoting is unnecessary in some cases, including diagonally dominant and symmetric positive definite matrices (see e.g. theorem 9.12 in [1]). Returns: A `Tensor` of shape [..., M] or [..., M, K] containing the solutions. Raises: ValueError: An unsupported type is provided as input, or when the input tensors have incorrect shapes. [1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms: Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7. """ if diagonals_format == 'compact': return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name) if diagonals_format == 'sequence': if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3: raise ValueError('Expected diagonals to be a sequence of length 3.') superdiag, maindiag, subdiag = diagonals if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])): raise ValueError( 'Tensors representing the three diagonals must have the same shape,' 'except for the last dimension, got {}, {}, {}'.format( subdiag.shape, maindiag.shape, superdiag.shape)) m = tensor_shape.dimension_value(maindiag.shape[-1]) def pad_if_necessary(t, name, last_dim_padding): n = tensor_shape.dimension_value(t.shape[-1]) if not n or n == m: return t if n == m - 1: paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] + [last_dim_padding]) return array_ops.pad(t, paddings) raise ValueError('Expected {} to be have length {} or {}, got {}.'.format( name, m, m - 1, n)) subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0]) superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1]) diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2) return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name) if diagonals_format == 'matrix': m1 = tensor_shape.dimension_value(diagonals.shape[-1]) m2 = tensor_shape.dimension_value(diagonals.shape[-2]) if m1 and m2 and m1 != m2: raise ValueError( 'Expected last two dimensions of diagonals to be same, got {} and {}' .format(m1, m2)) m = m1 or m2 if not m: raise ValueError('The size of the matrix needs to be known for ' 'diagonals_format="matrix"') # Extract diagonals; use input[..., 0, 0] as "dummy" m-th elements of sub- # and superdiagonal. # gather_nd slices into first indices, whereas we need to slice into the # last two, so transposing back and forth is necessary. dummy_idx = [0, 0] indices = ([[[1, 0], [0, 0], dummy_idx]] + [[[i + 1, i], [i, i], [i - 1, i]] for i in range(1, m - 1)] + [[dummy_idx, [m - 1, m - 1], [m - 2, m - 1]]]) diagonals = array_ops.transpose( array_ops.gather_nd(array_ops.transpose(diagonals), indices)) return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name) raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format)) def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs, conjugate_rhs, partial_pivoting, name): """Helper function used after the input has been cast to compact form.""" diags_rank, rhs_rank = len(diagonals.shape), len(rhs.shape) if diags_rank < 2: raise ValueError( 'Expected diagonals to have rank at least 2, got {}'.format(diags_rank)) if rhs_rank != diags_rank and rhs_rank != diags_rank - 1: raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format( diags_rank - 1, diags_rank, rhs_rank)) if diagonals.shape[-2] and diagonals.shape[-2] != 3: raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2])) if not diagonals.shape[:-2].is_compatible_with(rhs.shape[:diags_rank - 2]): raise ValueError('Batch shapes {} and {} are incompatible'.format( diagonals.shape[:-2], rhs.shape[:diags_rank - 2])) def check_num_lhs_matches_num_rhs(): if (diagonals.shape[-1] and rhs.shape[-2] and diagonals.shape[-1] != rhs.shape[-2]): raise ValueError('Expected number of left-hand sided and right-hand ' 'sides to be equal, got {} and {}'.format( diagonals.shape[-1], rhs.shape[-2])) if rhs_rank == diags_rank - 1: # Rhs provided as a vector, ignoring transpose_rhs if conjugate_rhs: rhs = math_ops.conj(rhs) rhs = array_ops.expand_dims(rhs, -1) check_num_lhs_matches_num_rhs() return array_ops.squeeze( linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name), -1) if transpose_rhs: rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs) elif conjugate_rhs: rhs = math_ops.conj(rhs) check_num_lhs_matches_num_rhs() result = linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name) return array_ops.matrix_transpose(result) if transpose_rhs else result @tf_export('linalg.tridiagonal_matmul') def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None): r"""Multiplies tridiagonal matrix by matrix. `diagonals` is representation of 3-diagonal NxN matrix, which depends on `diagonals_format`. In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with two inner-most dimensions representing the square tridiagonal matrices. Elements outside of the three diagonals will be ignored. If `sequence` format, `diagonals` is list or tuple of three tensors: `[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element of `superdiag` first element of `subdiag` are ignored. In `compact` format the three diagonals are brought together into one tensor of shape `[..., 3, M]`, with last two dimensions containing superdiagonals, diagonals, and subdiagonals, in order. Similarly to `sequence` format, elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored. The `sequence` format is recommended as the one with the best performance. `rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`. Example: ```python superdiag = tf.constant([-1, -1, 0], dtype=tf.float64) maindiag = tf.constant([2, 2, 2], dtype=tf.float64) subdiag = tf.constant([0, -1, -1], dtype=tf.float64) diagonals = [superdiag, maindiag, subdiag] rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64) x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence') ``` Args: diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The shape depends of `diagonals_format`, see description above. Must be `float32`, `float64`, `complex64`, or `complex128`. rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`. diagonals_format: one of `sequence`, or `compact`. Default is `compact`. name: A name to give this `Op` (optional). Returns: A `Tensor` of shape [..., M, N] containing the result of multiplication. Raises: ValueError: An unsupported type is provided as input, or when the input tensors have incorrect shapes. """ if diagonals_format == 'compact': superdiag = diagonals[..., 0, :] maindiag = diagonals[..., 1, :] subdiag = diagonals[..., 2, :] elif diagonals_format == 'sequence': superdiag, maindiag, subdiag = diagonals elif diagonals_format == 'matrix': m1 = tensor_shape.dimension_value(diagonals.shape[-1]) m2 = tensor_shape.dimension_value(diagonals.shape[-2]) if not m1 or not m2: raise ValueError('The size of the matrix needs to be known for ' 'diagonals_format="matrix"') if m1 != m2: raise ValueError( 'Expected last two dimensions of diagonals to be same, got {} and {}' .format(m1, m2)) # TODO(b/131695260): use matrix_diag_part when it supports extracting # arbitrary diagonals. maindiag = array_ops.matrix_diag_part(diagonals) diagonals = array_ops.transpose(diagonals) dummy_index = [0, 0] superdiag_indices = [[i + 1, i] for i in range(0, m1 - 1)] + [dummy_index] subdiag_indices = [dummy_index] + [[i - 1, i] for i in range(1, m1)] superdiag = array_ops.transpose( array_ops.gather_nd(diagonals, superdiag_indices)) subdiag = array_ops.transpose( array_ops.gather_nd(diagonals, subdiag_indices)) else: raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format) # C++ backend requires matrices. # Converting 1-dimensional vectors to matrices with 1 row. superdiag = array_ops.expand_dims(superdiag, -2) maindiag = array_ops.expand_dims(maindiag, -2) subdiag = array_ops.expand_dims(subdiag, -2) return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name) def _maybe_validate_matrix(a, validate_args): """Checks that input is a `float` matrix.""" assertions = [] if not a.dtype.is_floating: raise TypeError('Input `a` must have `float`-like `dtype` ' '(saw {}).'.format(a.dtype.name)) if a.shape is not None and a.shape.rank is not None: if a.shape.rank < 2: raise ValueError('Input `a` must have at least 2 dimensions ' '(saw: {}).'.format(a.shape.rank)) elif validate_args: assertions.append( check_ops.assert_rank_at_least( a, rank=2, message='Input `a` must have at least 2 dimensions.')) return assertions @tf_export('linalg.matrix_rank') def matrix_rank(a, tol=None, validate_args=False, name=None): """Compute the matrix rank of one or more matrices. Arguments: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. tol: Threshold below which the singular value is counted as 'zero'. Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`). validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: 'matrix_rank'. Returns: matrix_rank: (Batch of) `int32` scalars representing the number of non-zero singular values. """ with ops.name_scope(name or 'matrix_rank'): a = ops.convert_to_tensor(a, dtype_hint=dtypes.float32, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with ops.control_dependencies(assertions): a = array_ops.identity(a) s = svd(a, compute_uv=False) if tol is None: if (a.shape[-2:]).is_fully_defined(): m = np.max(a.shape[-2:].as_list()) else: m = math_ops.reduce_max(array_ops.shape(a)[-2:]) eps = np.finfo(a.dtype.as_numpy_dtype).eps tol = ( eps * math_ops.cast(m, a.dtype) * math_ops.reduce_max(s, axis=-1, keepdims=True)) return math_ops.reduce_sum(math_ops.cast(s > tol, dtypes.int32), axis=-1) @tf_export('linalg.pinv') def pinv(a, rcond=None, validate_args=False, name=None): """Compute the Moore-Penrose pseudo-inverse of one or more matrices. Calculate the [generalized inverse of a matrix]( https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse) using its singular-value decomposition (SVD) and including all large singular values. The pseudo-inverse of a matrix `A`, is defined as: 'the matrix that 'solves' [the least-squares problem] `A @ x = b`,' i.e., if `x_hat` is a solution, then `A_pinv` is the matrix such that `x_hat = A_pinv @ b`. It can be shown that if `U @ Sigma @ V.T = A` is the singular value decomposition of `A`, then `A_pinv = V @ inv(Sigma) U^T`. [(Strang, 1980)][1] This function is analogous to [`numpy.linalg.pinv`]( https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.pinv.html). It differs only in default value of `rcond`. In `numpy.linalg.pinv`, the default `rcond` is `1e-15`. Here the default is `10. * max(num_rows, num_cols) * np.finfo(dtype).eps`. Args: a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be pseudo-inverted. rcond: `Tensor` of small singular value cutoffs. Singular values smaller (in modulus) than `rcond` * largest_singular_value (again, in modulus) are set to zero. Must broadcast against `tf.shape(a)[:-2]`. Default value: `10. * max(num_rows, num_cols) * np.finfo(a.dtype).eps`. validate_args: When `True`, additional assertions might be embedded in the graph. Default value: `False` (i.e., no graph assertions are added). name: Python `str` prefixed to ops created by this function. Default value: 'pinv'. Returns: a_pinv: (Batch of) pseudo-inverse of input `a`. Has same shape as `a` except rightmost two dimensions are transposed. Raises: TypeError: if input `a` does not have `float`-like `dtype`. ValueError: if input `a` has fewer than 2 dimensions. #### Examples ```python import tensorflow as tf import tensorflow_probability as tfp a = tf.constant([[1., 0.4, 0.5], [0.4, 0.2, 0.25], [0.5, 0.25, 0.35]]) tf.matmul(tf.linalg..pinv(a), a) # ==> array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) a = tf.constant([[1., 0.4, 0.5, 1.], [0.4, 0.2, 0.25, 2.], [0.5, 0.25, 0.35, 3.]]) tf.matmul(tf.linalg..pinv(a), a) # ==> array([[ 0.76, 0.37, 0.21, -0.02], [ 0.37, 0.43, -0.33, 0.02], [ 0.21, -0.33, 0.81, 0.01], [-0.02, 0.02, 0.01, 1. ]], dtype=float32) ``` #### References [1]: G. Strang. 'Linear Algebra and Its Applications, 2nd Ed.' Academic Press, Inc., 1980, pp. 139-142. """ with ops.name_scope(name or 'pinv'): a = ops.convert_to_tensor(a, name='a') assertions = _maybe_validate_matrix(a, validate_args) if assertions: with ops.control_dependencies(assertions): a = array_ops.identity(a) dtype = a.dtype.as_numpy_dtype if rcond is None: def get_dim_size(dim): dim_val = tensor_shape.dimension_value(a.shape[dim]) if dim_val is not None: return dim_val return array_ops.shape(a)[dim] num_rows = get_dim_size(-2) num_cols = get_dim_size(-1) if isinstance(num_rows, int) and isinstance(num_cols, int): max_rows_cols = float(max(num_rows, num_cols)) else: max_rows_cols = math_ops.cast( math_ops.maximum(num_rows, num_cols), dtype) rcond = 10. * max_rows_cols * np.finfo(dtype).eps rcond = ops.convert_to_tensor(rcond, dtype=dtype, name='rcond') # Calculate pseudo inverse via SVD. # Note: if a is Hermitian then u == v. (We might observe additional # performance by explicitly setting `v = u` in such cases.) [ singular_values, # Sigma left_singular_vectors, # U right_singular_vectors, # V ] = svd( a, full_matrices=False, compute_uv=True) # Saturate small singular values to inf. This has the effect of make # `1. / s = 0.` while not resulting in `NaN` gradients. cutoff = rcond * math_ops.reduce_max(singular_values, axis=-1) singular_values = array_ops.where_v2( singular_values > array_ops.expand_dims_v2(cutoff, -1), singular_values, np.array(np.inf, dtype)) # By the definition of the SVD, `a == u @ s @ v^H`, and the pseudo-inverse # is defined as `pinv(a) == v @ inv(s) @ u^H`. a_pinv = math_ops.matmul( right_singular_vectors / array_ops.expand_dims_v2(singular_values, -2), left_singular_vectors, adjoint_b=True) if a.shape is not None and a.shape.rank is not None: a_pinv.set_shape(a.shape[:-2].concatenate([a.shape[-1], a.shape[-2]])) return a_pinv
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linalg_impl.py
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing `LinearOperator` and sub-classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import itertools import numpy as np import six from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.platform import test class OperatorShapesInfo(object): """Object encoding expected shape for a test. Encodes the expected shape of a matrix for a test. Also allows additional metadata for the test harness. """ def __init__(self, shape, **kwargs): self.shape = shape self.__dict__.update(kwargs) class CheckTapeSafeSkipOptions(object): # Skip checking this particular method. DETERMINANT = "determinant" DIAG_PART = "diag_part" LOG_ABS_DETERMINANT = "log_abs_determinant" TRACE = "trace" @six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init class LinearOperatorDerivedClassTest(test.TestCase): """Tests for derived classes. Subclasses should implement every abstractmethod, and this will enable all test methods to work. """ # Absolute/relative tolerance for tests. # Raised float32 tolerances to 1e-5 to allow cholesky tests to pass on ARM. # See nvbug 3460814. _atol = { dtypes.float16: 1e-3, dtypes.float32: 1e-5, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } _rtol = { dtypes.float16: 1e-3, dtypes.float32: 1e-5, dtypes.float64: 1e-12, dtypes.complex64: 1e-6, dtypes.complex128: 1e-12 } def assertAC(self, x, y): """Derived classes can set _atol, _rtol to get different tolerance.""" dtype = dtypes.as_dtype(x.dtype) atol = self._atol[dtype] rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol) @staticmethod def adjoint_options(): return [False, True] @staticmethod def adjoint_arg_options(): return [False, True] @staticmethod def dtypes_to_test(): # TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit. return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] @staticmethod def use_placeholder_options(): return [False, True] @staticmethod def operator_shapes_infos(): """Returns list of OperatorShapesInfo, encapsulating the shape to test.""" raise NotImplementedError("operator_shapes_infos has not been implemented.") @abc.abstractmethod def operator_and_matrix( self, shapes_info, dtype, use_placeholder, ensure_self_adjoint_and_pd=False): """Build a batch matrix and an Operator that should have similar behavior. Every operator acts like a (batch) matrix. This method returns both together, and is used by tests. Args: shapes_info: `OperatorShapesInfo`, encoding shape information about the operator. dtype: Numpy dtype. Data type of returned array/operator. use_placeholder: Python bool. If True, initialize the operator with a placeholder of undefined shape and correct dtype. ensure_self_adjoint_and_pd: If `True`, construct this operator to be Hermitian Positive Definite, as well as ensuring the hints `is_positive_definite` and `is_self_adjoint` are set. This is useful for testing methods such as `cholesky`. Returns: operator: `LinearOperator` subclass instance. mat: `Tensor` representing operator. """ # Create a matrix as a numpy array with desired shape/dtype. # Create a LinearOperator that should have the same behavior as the matrix. raise NotImplementedError("Not implemented yet.") @abc.abstractmethod def make_rhs(self, operator, adjoint, with_batch=True): """Make a rhs appropriate for calling operator.solve(rhs). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the adjoint operator. with_batch: Python `bool`. If `True`, create `rhs` with the same batch shape as operator, and otherwise create a matrix without any batch shape. Returns: A `Tensor` """ raise NotImplementedError("make_rhs is not defined.") @abc.abstractmethod def make_x(self, operator, adjoint, with_batch=True): """Make an 'x' appropriate for calling operator.matmul(x). Args: operator: A `LinearOperator` adjoint: Python `bool`. If `True`, we are making an 'x' value for the adjoint operator. with_batch: Python `bool`. If `True`, create `x` with the same batch shape as operator, and otherwise create a matrix without any batch shape. Returns: A `Tensor` """ raise NotImplementedError("make_x is not defined.") @staticmethod def skip_these_tests(): """List of test names to skip.""" # Subclasses should over-ride if they want to skip some tests. # To skip "test_foo", add "foo" to this list. return [] def assertRaisesError(self, msg): """assertRaisesRegexp or OpError, depending on context.executing_eagerly.""" if context.executing_eagerly(): return self.assertRaisesRegexp(Exception, msg) return self.assertRaisesOpError(msg) def check_tape_safe(self, operator, skip_options=None): """Check gradients are not None w.r.t. operator.variables. Meant to be called from the derived class. This ensures grads are not w.r.t every variable in operator.variables. If more fine-grained testing is needed, a custom test should be written. Args: operator: LinearOperator. Exact checks done will depend on hints. skip_options: Optional list of CheckTapeSafeSkipOptions. Makes this test skip particular checks. """ skip_options = skip_options or [] if not operator.variables: raise AssertionError("`operator.variables` was empty") def _assert_not_none(iterable): for item in iterable: self.assertIsNotNone(item) # Tape tests that can be run on every operator below. with backprop.GradientTape() as tape: _assert_not_none(tape.gradient(operator.to_dense(), operator.variables)) with backprop.GradientTape() as tape: _assert_not_none( tape.gradient(operator.adjoint().to_dense(), operator.variables)) x = math_ops.cast( array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype) with backprop.GradientTape() as tape: _assert_not_none(tape.gradient(operator.matvec(x), operator.variables)) # Tests for square, but possibly non-singular operators below. if not operator.is_square: return for option in [ CheckTapeSafeSkipOptions.DETERMINANT, CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT, CheckTapeSafeSkipOptions.DIAG_PART, CheckTapeSafeSkipOptions.TRACE, ]: with backprop.GradientTape() as tape: if option not in skip_options: _assert_not_none( tape.gradient(getattr(operator, option)(), operator.variables)) # Tests for non-singular operators below. if operator.is_non_singular is False: # pylint: disable=g-bool-id-comparison return with backprop.GradientTape() as tape: _assert_not_none( tape.gradient(operator.inverse().to_dense(), operator.variables)) with backprop.GradientTape() as tape: _assert_not_none(tape.gradient(operator.solvevec(x), operator.variables)) # Tests for SPD operators below. if not (operator.is_self_adjoint and operator.is_positive_definite): return with backprop.GradientTape() as tape: _assert_not_none( tape.gradient(operator.cholesky().to_dense(), operator.variables)) # pylint:disable=missing-docstring def _test_to_dense(use_placeholder, shapes_info, dtype): def test_to_dense(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_dense = operator.to_dense() if not use_placeholder: self.assertAllEqual(shapes_info.shape, op_dense.shape) op_dense_v, mat_v = sess.run([op_dense, mat]) self.assertAC(op_dense_v, mat_v) return test_to_dense def _test_det(use_placeholder, shapes_info, dtype): def test_det(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_det = operator.determinant() if not use_placeholder: self.assertAllEqual(shapes_info.shape[:-2], op_det.shape) op_det_v, mat_det_v = sess.run( [op_det, linalg_ops.matrix_determinant(mat)]) self.assertAC(op_det_v, mat_det_v) return test_det def _test_log_abs_det(use_placeholder, shapes_info, dtype): def test_log_abs_det(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_log_abs_det = operator.log_abs_determinant() _, mat_log_abs_det = linalg.slogdet(mat) if not use_placeholder: self.assertAllEqual( shapes_info.shape[:-2], op_log_abs_det.shape) op_log_abs_det_v, mat_log_abs_det_v = sess.run( [op_log_abs_det, mat_log_abs_det]) self.assertAC(op_log_abs_det_v, mat_log_abs_det_v) return test_log_abs_det def _test_matmul_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch): # If batch dimensions are omitted, but there are # no batch dimensions for the linear operator, then # skip the test case. This is already checked with # with_batch=True. if not with_batch and len(shapes_info.shape) <= 2: return with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) x = self.make_x( operator, adjoint=adjoint, with_batch=with_batch) # If adjoint_arg, compute A X^H^H = A X. if adjoint_arg: op_matmul = operator.matmul( linalg.adjoint(x), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_matmul = operator.matmul(x, adjoint=adjoint) mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint) if not use_placeholder: self.assertAllEqual(op_matmul.shape, mat_matmul.shape) op_matmul_v, mat_matmul_v = sess.run( [op_matmul, mat_matmul]) self.assertAC(op_matmul_v, mat_matmul_v) def _test_matmul( use_placeholder, shapes_info, dtype, adjoint, adjoint_arg): def test_matmul(self): _test_matmul_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch=True) return test_matmul def _test_matmul_with_broadcast( use_placeholder, shapes_info, dtype, adjoint, adjoint_arg): def test_matmul_with_broadcast(self): _test_matmul_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch=True) return test_matmul_with_broadcast def _test_adjoint(use_placeholder, shapes_info, dtype): def test_adjoint(self): with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_adjoint = operator.adjoint().to_dense() op_adjoint_h = operator.H.to_dense() mat_adjoint = linalg.adjoint(mat) op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run( [op_adjoint, op_adjoint_h, mat_adjoint]) self.assertAC(mat_adjoint_v, op_adjoint_v) self.assertAC(mat_adjoint_v, op_adjoint_h_v) return test_adjoint def _test_cholesky(use_placeholder, shapes_info, dtype): def test_cholesky(self): with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder, ensure_self_adjoint_and_pd=True) op_chol = operator.cholesky().to_dense() mat_chol = linalg_ops.cholesky(mat) op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol]) self.assertAC(mat_chol_v, op_chol_v) return test_cholesky def _test_solve_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch): # If batch dimensions are omitted, but there are # no batch dimensions for the linear operator, then # skip the test case. This is already checked with # with_batch=True. if not with_batch and len(shapes_info.shape) <= 2: return with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) rhs = self.make_rhs( operator, adjoint=adjoint, with_batch=with_batch) # If adjoint_arg, solve A X = (rhs^H)^H = rhs. if adjoint_arg: op_solve = operator.solve( linalg.adjoint(rhs), adjoint=adjoint, adjoint_arg=adjoint_arg) else: op_solve = operator.solve( rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) mat_solve = linear_operator_util.matrix_solve_with_broadcast( mat, rhs, adjoint=adjoint) if not use_placeholder: self.assertAllEqual(op_solve.shape, mat_solve.shape) op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve]) self.assertAC(op_solve_v, mat_solve_v) def _test_solve( use_placeholder, shapes_info, dtype, adjoint, adjoint_arg): def test_solve(self): _test_solve_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch=True) return test_solve def _test_solve_with_broadcast( use_placeholder, shapes_info, dtype, adjoint, adjoint_arg): def test_solve_with_broadcast(self): _test_solve_base( self, use_placeholder, shapes_info, dtype, adjoint, adjoint_arg, with_batch=False) return test_solve_with_broadcast def _test_inverse(use_placeholder, shapes_info, dtype): def test_inverse(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_inverse_v, mat_inverse_v = sess.run([ operator.inverse().to_dense(), linalg.inv(mat)]) self.assertAC(op_inverse_v, mat_inverse_v) return test_inverse def _test_trace(use_placeholder, shapes_info, dtype): def test_trace(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_trace = operator.trace() mat_trace = math_ops.trace(mat) if not use_placeholder: self.assertAllEqual(op_trace.shape, mat_trace.shape) op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace]) self.assertAC(op_trace_v, mat_trace_v) return test_trace def _test_add_to_tensor(use_placeholder, shapes_info, dtype): def test_add_to_tensor(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_plus_2mat = operator.add_to_tensor(2 * mat) if not use_placeholder: self.assertAllEqual(shapes_info.shape, op_plus_2mat.shape) op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat]) self.assertAC(op_plus_2mat_v, 3 * mat_v) return test_add_to_tensor def _test_diag_part(use_placeholder, shapes_info, dtype): def test_diag_part(self): with self.session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat = self.operator_and_matrix( shapes_info, dtype, use_placeholder=use_placeholder) op_diag_part = operator.diag_part() mat_diag_part = array_ops.matrix_diag_part(mat) if not use_placeholder: self.assertAllEqual(mat_diag_part.shape, op_diag_part.shape) op_diag_part_, mat_diag_part_ = sess.run( [op_diag_part, mat_diag_part]) self.assertAC(op_diag_part_, mat_diag_part_) return test_diag_part # pylint:enable=missing-docstring def add_tests(test_cls): """Add tests for LinearOperator methods.""" test_name_dict = { "add_to_tensor": _test_add_to_tensor, "cholesky": _test_cholesky, "det": _test_det, "diag_part": _test_diag_part, "inverse": _test_inverse, "log_abs_det": _test_log_abs_det, "matmul": _test_matmul, "matmul_with_broadcast": _test_matmul_with_broadcast, "solve": _test_solve, "solve_with_broadcast": _test_solve_with_broadcast, "to_dense": _test_to_dense, "trace": _test_trace, } tests_with_adjoint_args = [ "matmul", "matmul_with_broadcast", "solve", "solve_with_broadcast", ] for name, test_template_fn in test_name_dict.items(): if name in test_cls.skip_these_tests(): continue for dtype, use_placeholder, shape_info in itertools.product( test_cls.dtypes_to_test(), test_cls.use_placeholder_options(), test_cls.operator_shapes_infos()): base_test_name = "_".join([ "test", name, "_shape={},dtype={},use_placeholder={}".format( shape_info.shape, dtype, use_placeholder)]) if name in tests_with_adjoint_args: for adjoint in test_cls.adjoint_options(): for adjoint_arg in test_cls.adjoint_arg_options(): test_name = base_test_name + ",adjoint={},adjoint_arg={}".format( adjoint, adjoint_arg) if hasattr(test_cls, test_name): raise RuntimeError("Test %s defined more than once" % test_name) setattr( test_cls, test_name, test_util.run_deprecated_v1(test_template_fn( use_placeholder, shape_info, dtype, adjoint, adjoint_arg))) else: if hasattr(test_cls, base_test_name): raise RuntimeError("Test %s defined more than once" % base_test_name) setattr( test_cls, base_test_name, test_util.run_deprecated_v1(test_template_fn( use_placeholder, shape_info, dtype))) @six.add_metaclass(abc.ABCMeta) class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for square operators. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @staticmethod def operator_shapes_infos(): shapes_info = OperatorShapesInfo # non-batch operators (n, n) and batch operators. return [ shapes_info((0, 0)), shapes_info((1, 1)), shapes_info((1, 3, 3)), shapes_info((3, 4, 4)), shapes_info((2, 1, 4, 4))] def make_rhs(self, operator, adjoint, with_batch=True): # This operator is square, so rhs and x will have same shape. # adjoint value makes no difference because the operator shape doesn't # change since it is square, but be pedantic. return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch) def make_x(self, operator, adjoint, with_batch=True): # Value of adjoint makes no difference because the operator is square. # Return the number of systems to solve, R, equal to 1 or 2. r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() n = operator.domain_dimension.value if with_batch: x_shape = batch_shape + [n, r] else: x_shape = [n, r] else: batch_shape = operator.batch_shape_tensor() n = operator.domain_dimension_tensor() if with_batch: x_shape = array_ops.concat((batch_shape, [n, r]), 0) else: x_shape = [n, r] return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 @six.add_metaclass(abc.ABCMeta) class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest): """Base test class appropriate for generic rectangular operators. Square shapes are never tested by this class, so if you want to test your operator with a square shape, create two test classes, the other subclassing SquareLinearOperatorFullMatrixTest. Sub-classes must still define all abstractmethods from LinearOperatorDerivedClassTest that are not defined here. """ @staticmethod def skip_these_tests(): """List of test names to skip.""" return [ "cholesky", "inverse", "solve", "solve_with_broadcast", "det", "log_abs_det" ] @staticmethod def operator_shapes_infos(): shapes_info = OperatorShapesInfo # non-batch operators (n, n) and batch operators. return [ shapes_info((2, 1)), shapes_info((1, 2)), shapes_info((1, 3, 2)), shapes_info((3, 3, 4)), shapes_info((2, 1, 2, 4))] def make_rhs(self, operator, adjoint, with_batch=True): # TODO(langmore) Add once we're testing solve_ls. raise NotImplementedError( "make_rhs not implemented because we don't test solve") def make_x(self, operator, adjoint, with_batch=True): # Return the number of systems for the argument 'x' for .matmul(x) r = self._get_num_systems(operator) # If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of # shape [B1,...,Bb, N, R], R = 1 or 2. if operator.shape.is_fully_defined(): batch_shape = operator.batch_shape.as_list() if adjoint: n = operator.range_dimension.value else: n = operator.domain_dimension.value if with_batch: x_shape = batch_shape + [n, r] else: x_shape = [n, r] else: batch_shape = operator.batch_shape_tensor() if adjoint: n = operator.range_dimension_tensor() else: n = operator.domain_dimension_tensor() if with_batch: x_shape = array_ops.concat((batch_shape, [n, r]), 0) else: x_shape = [n, r] return random_normal(x_shape, dtype=operator.dtype) def _get_num_systems(self, operator): """Get some number, either 1 or 2, depending on operator.""" if operator.tensor_rank is None or operator.tensor_rank % 2: return 1 else: return 2 def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape.dims[-1].assert_is_compatible_with(shape.dims[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True) def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True): """[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype. """ with ops.name_scope("random_tril_matrix"): # Totally random matrix. Has no nice properties. tril = random_normal(shape, dtype=dtype) if remove_upper: tril = array_ops.matrix_band_part(tril, -1, 0) # Create a diagonal with entries having modulus in [1, 2]. if force_well_conditioned: maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype) diag = random_sign_uniform( shape[:-1], dtype=dtype, minval=1., maxval=maxval) tril = array_ops.matrix_set_diag(tril, diag) return tril def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like ``` N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like ``` Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like ``` Z * X, where X ~ Uniform[minval, maxval], if dtype is real, Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex. ``` Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) def random_normal_correlated_columns(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None): """Batch matrix with (possibly complex) Gaussian entries and correlated cols. Returns random batch matrix `A` with specified element-wise `mean`, `stddev`, living close to an embedded hyperplane. Suppose `shape[-2:] = (M, N)`. If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. If `M >= N`, then the colums of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) B = random normal M x N-1 matrix, mean = 0, stddev = stddev. G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane E = a random normal M x N matrix, mean = 0, stddev = eps mu = a constant M x N matrix, equal to the argument "mean" A = G + E + mu ``` Args: shape: Python list of integers. Shape of the returned tensor. Must be at least length two. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype eps: Distance each column is perturbed from the low-dimensional subspace. seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. Raises: ValueError: If `shape` is not at least length 2. """ dtype = dtypes.as_dtype(dtype) if len(shape) < 2: raise ValueError( "Argument shape must be at least length 2. Found: %s" % shape) # Shape is the final shape, e.g. [..., M, N] shape = list(shape) batch_shape = shape[:-2] m, n = shape[-2:] # If there is only one column, "they" are by definition correlated. if n < 2 or n < m: return random_normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed) # Shape of the matrix with only n - 1 columns that we will embed in higher # dimensional space. smaller_shape = batch_shape + [m, n - 1] # Shape of the embedding matrix, mapping batch matrices # from [..., N-1, M] to [..., N, M] embedding_mat_shape = batch_shape + [n, n - 1] # This stddev for the embedding_mat ensures final result has correct stddev. stddev_mat = 1 / np.sqrt(n - 1) with ops.name_scope("random_normal_correlated_columns"): smaller_mat = random_normal( smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed) if seed is not None: seed += 1287 embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed) embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True) embedded = array_ops.matrix_transpose(embedded_t) mean_mat = array_ops.ones_like(embedded) * mean return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_test_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registration mechanisms for various n-ary operations on LinearOperators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from tensorflow.python.framework import ops from tensorflow.python.util import tf_inspect _ADJOINTS = {} _CHOLESKY_DECOMPS = {} _MATMUL = {} _SOLVE = {} _INVERSES = {} def _registered_function(type_list, registry): """Given a list of classes, finds the most specific function registered.""" enumerated_hierarchies = [enumerate(tf_inspect.getmro(t)) for t in type_list] # Get all possible combinations of hierarchies. cls_combinations = list(itertools.product(*enumerated_hierarchies)) def hierarchy_distance(cls_combination): candidate_distance = sum(c[0] for c in cls_combination) if tuple(c[1] for c in cls_combination) in registry: return candidate_distance return 10000 registered_combination = min(cls_combinations, key=hierarchy_distance) return registry.get(tuple(r[1] for r in registered_combination), None) def _registered_adjoint(type_a): """Get the Adjoint function registered for class a.""" return _registered_function([type_a], _ADJOINTS) def _registered_cholesky(type_a): """Get the Cholesky function registered for class a.""" return _registered_function([type_a], _CHOLESKY_DECOMPS) def _registered_matmul(type_a, type_b): """Get the Matmul function registered for classes a and b.""" return _registered_function([type_a, type_b], _MATMUL) def _registered_solve(type_a, type_b): """Get the Solve function registered for classes a and b.""" return _registered_function([type_a, type_b], _SOLVE) def _registered_inverse(type_a): """Get the Cholesky function registered for class a.""" return _registered_function([type_a], _INVERSES) def adjoint(lin_op_a, name=None): """Get the adjoint associated to lin_op_a. Args: lin_op_a: The LinearOperator to take the adjoint of. name: Name to use for this operation. Returns: A LinearOperator that represents the adjoint of `lin_op_a`. Raises: NotImplementedError: If no Adjoint method is defined for the LinearOperator type of `lin_op_a`. """ adjoint_fn = _registered_adjoint(type(lin_op_a)) if adjoint_fn is None: raise ValueError("No adjoint registered for {}".format( type(lin_op_a))) with ops.name_scope(name, "Adjoint"): return adjoint_fn(lin_op_a) def cholesky(lin_op_a, name=None): """Get the Cholesky factor associated to lin_op_a. Args: lin_op_a: The LinearOperator to decompose. name: Name to use for this operation. Returns: A LinearOperator that represents the lower Cholesky factor of `lin_op_a`. Raises: NotImplementedError: If no Cholesky method is defined for the LinearOperator type of `lin_op_a`. """ cholesky_fn = _registered_cholesky(type(lin_op_a)) if cholesky_fn is None: raise ValueError("No cholesky decomposition registered for {}".format( type(lin_op_a))) with ops.name_scope(name, "Cholesky"): return cholesky_fn(lin_op_a) def matmul(lin_op_a, lin_op_b, name=None): """Compute lin_op_a.matmul(lin_op_b). Args: lin_op_a: The LinearOperator on the left. lin_op_b: The LinearOperator on the right. name: Name to use for this operation. Returns: A LinearOperator that represents the matmul between `lin_op_a` and `lin_op_b`. Raises: NotImplementedError: If no matmul method is defined between types of `lin_op_a` and `lin_op_b`. """ matmul_fn = _registered_matmul(type(lin_op_a), type(lin_op_b)) if matmul_fn is None: raise ValueError("No matmul registered for {}.matmul({})".format( type(lin_op_a), type(lin_op_b))) with ops.name_scope(name, "Matmul"): return matmul_fn(lin_op_a, lin_op_b) def solve(lin_op_a, lin_op_b, name=None): """Compute lin_op_a.solve(lin_op_b). Args: lin_op_a: The LinearOperator on the left. lin_op_b: The LinearOperator on the right. name: Name to use for this operation. Returns: A LinearOperator that represents the solve between `lin_op_a` and `lin_op_b`. Raises: NotImplementedError: If no solve method is defined between types of `lin_op_a` and `lin_op_b`. """ solve_fn = _registered_solve(type(lin_op_a), type(lin_op_b)) if solve_fn is None: raise ValueError("No solve registered for {}.solve({})".format( type(lin_op_a), type(lin_op_b))) with ops.name_scope(name, "Solve"): return solve_fn(lin_op_a, lin_op_b) def inverse(lin_op_a, name=None): """Get the Inverse associated to lin_op_a. Args: lin_op_a: The LinearOperator to decompose. name: Name to use for this operation. Returns: A LinearOperator that represents the inverse of `lin_op_a`. Raises: NotImplementedError: If no Inverse method is defined for the LinearOperator type of `lin_op_a`. """ inverse_fn = _registered_inverse(type(lin_op_a)) if inverse_fn is None: raise ValueError("No inverse registered for {}".format( type(lin_op_a))) with ops.name_scope(name, "Inverse"): return inverse_fn(lin_op_a) class RegisterAdjoint(object): """Decorator to register an Adjoint implementation function. Usage: @linear_operator_algebra.RegisterAdjoint(lin_op.LinearOperatorIdentity) def _adjoint_identity(lin_op_a): # Return the identity matrix. """ def __init__(self, lin_op_cls_a): """Initialize the LinearOperator registrar. Args: lin_op_cls_a: the class of the LinearOperator to decompose. """ self._key = (lin_op_cls_a,) def __call__(self, adjoint_fn): """Perform the Adjoint registration. Args: adjoint_fn: The function to use for the Adjoint. Returns: adjoint_fn Raises: TypeError: if adjoint_fn is not a callable. ValueError: if a Adjoint function has already been registered for the given argument classes. """ if not callable(adjoint_fn): raise TypeError( "adjoint_fn must be callable, received: {}".format(adjoint_fn)) if self._key in _ADJOINTS: raise ValueError("Adjoint({}) has already been registered to: {}".format( self._key[0].__name__, _ADJOINTS[self._key])) _ADJOINTS[self._key] = adjoint_fn return adjoint_fn class RegisterCholesky(object): """Decorator to register a Cholesky implementation function. Usage: @linear_operator_algebra.RegisterCholesky(lin_op.LinearOperatorIdentity) def _cholesky_identity(lin_op_a): # Return the identity matrix. """ def __init__(self, lin_op_cls_a): """Initialize the LinearOperator registrar. Args: lin_op_cls_a: the class of the LinearOperator to decompose. """ self._key = (lin_op_cls_a,) def __call__(self, cholesky_fn): """Perform the Cholesky registration. Args: cholesky_fn: The function to use for the Cholesky. Returns: cholesky_fn Raises: TypeError: if cholesky_fn is not a callable. ValueError: if a Cholesky function has already been registered for the given argument classes. """ if not callable(cholesky_fn): raise TypeError( "cholesky_fn must be callable, received: {}".format(cholesky_fn)) if self._key in _CHOLESKY_DECOMPS: raise ValueError("Cholesky({}) has already been registered to: {}".format( self._key[0].__name__, _CHOLESKY_DECOMPS[self._key])) _CHOLESKY_DECOMPS[self._key] = cholesky_fn return cholesky_fn class RegisterMatmul(object): """Decorator to register a Matmul implementation function. Usage: @linear_operator_algebra.RegisterMatmul( lin_op.LinearOperatorIdentity, lin_op.LinearOperatorIdentity) def _matmul_identity(a, b): # Return the identity matrix. """ def __init__(self, lin_op_cls_a, lin_op_cls_b): """Initialize the LinearOperator registrar. Args: lin_op_cls_a: the class of the LinearOperator to multiply. lin_op_cls_b: the class of the second LinearOperator to multiply. """ self._key = (lin_op_cls_a, lin_op_cls_b) def __call__(self, matmul_fn): """Perform the Matmul registration. Args: matmul_fn: The function to use for the Matmul. Returns: matmul_fn Raises: TypeError: if matmul_fn is not a callable. ValueError: if a Matmul function has already been registered for the given argument classes. """ if not callable(matmul_fn): raise TypeError( "matmul_fn must be callable, received: {}".format(matmul_fn)) if self._key in _MATMUL: raise ValueError("Matmul({}, {}) has already been registered.".format( self._key[0].__name__, self._key[1].__name__)) _MATMUL[self._key] = matmul_fn return matmul_fn class RegisterSolve(object): """Decorator to register a Solve implementation function. Usage: @linear_operator_algebra.RegisterSolve( lin_op.LinearOperatorIdentity, lin_op.LinearOperatorIdentity) def _solve_identity(a, b): # Return the identity matrix. """ def __init__(self, lin_op_cls_a, lin_op_cls_b): """Initialize the LinearOperator registrar. Args: lin_op_cls_a: the class of the LinearOperator that is computing solve. lin_op_cls_b: the class of the second LinearOperator to solve. """ self._key = (lin_op_cls_a, lin_op_cls_b) def __call__(self, solve_fn): """Perform the Solve registration. Args: solve_fn: The function to use for the Solve. Returns: solve_fn Raises: TypeError: if solve_fn is not a callable. ValueError: if a Solve function has already been registered for the given argument classes. """ if not callable(solve_fn): raise TypeError( "solve_fn must be callable, received: {}".format(solve_fn)) if self._key in _SOLVE: raise ValueError("Solve({}, {}) has already been registered.".format( self._key[0].__name__, self._key[1].__name__)) _SOLVE[self._key] = solve_fn return solve_fn class RegisterInverse(object): """Decorator to register an Inverse implementation function. Usage: @linear_operator_algebra.RegisterInverse(lin_op.LinearOperatorIdentity) def _inverse_identity(lin_op_a): # Return the identity matrix. """ def __init__(self, lin_op_cls_a): """Initialize the LinearOperator registrar. Args: lin_op_cls_a: the class of the LinearOperator to decompose. """ self._key = (lin_op_cls_a,) def __call__(self, inverse_fn): """Perform the Inverse registration. Args: inverse_fn: The function to use for the Inverse. Returns: inverse_fn Raises: TypeError: if inverse_fn is not a callable. ValueError: if a Inverse function has already been registered for the given argument classes. """ if not callable(inverse_fn): raise TypeError( "inverse_fn must be callable, received: {}".format(inverse_fn)) if self._key in _INVERSES: raise ValueError("Inverse({}) has already been registered to: {}".format( self._key[0].__name__, _INVERSES[self._key])) _INVERSES[self._key] = inverse_fn return inverse_fn
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_algebra.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.inverse.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_block_diag from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_householder from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_inversion from tensorflow.python.ops.linalg import linear_operator_kronecker # By default, return LinearOperatorInversion which switched the .matmul # and .solve methods. @linear_operator_algebra.RegisterInverse(linear_operator.LinearOperator) def _inverse_linear_operator(linop): return linear_operator_inversion.LinearOperatorInversion( linop, is_non_singular=linop.is_non_singular, is_self_adjoint=linop.is_self_adjoint, is_positive_definite=linop.is_positive_definite, is_square=linop.is_square) @linear_operator_algebra.RegisterInverse( linear_operator_inversion.LinearOperatorInversion) def _inverse_inverse_linear_operator(linop_inversion): return linop_inversion.operator @linear_operator_algebra.RegisterInverse( linear_operator_diag.LinearOperatorDiag) def _inverse_diag(diag_operator): return linear_operator_diag.LinearOperatorDiag( 1. / diag_operator.diag, is_non_singular=diag_operator.is_non_singular, is_self_adjoint=diag_operator.is_self_adjoint, is_positive_definite=diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorIdentity) def _inverse_identity(identity_operator): return identity_operator @linear_operator_algebra.RegisterInverse( linear_operator_identity.LinearOperatorScaledIdentity) def _inverse_scaled_identity(identity_operator): return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access multiplier=1. / identity_operator.multiplier, is_non_singular=identity_operator.is_non_singular, is_self_adjoint=True, is_positive_definite=identity_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_block_diag.LinearOperatorBlockDiag) def _inverse_block_diag(block_diag_operator): # We take the inverse of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.inverse() for operator in block_diag_operator.operators], is_non_singular=block_diag_operator.is_non_singular, is_self_adjoint=block_diag_operator.is_self_adjoint, is_positive_definite=block_diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_kronecker.LinearOperatorKronecker) def _inverse_kronecker(kronecker_operator): # Inverse decomposition of a Kronecker product is the Kronecker product # of inverse decompositions. return linear_operator_kronecker.LinearOperatorKronecker( operators=[ operator.inverse() for operator in kronecker_operator.operators], is_non_singular=kronecker_operator.is_non_singular, is_self_adjoint=kronecker_operator.is_self_adjoint, is_positive_definite=kronecker_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_circulant.LinearOperatorCirculant) def _inverse_circulant(circulant_operator): # Inverting the spectrum is sufficient to get the inverse. return linear_operator_circulant.LinearOperatorCirculant( spectrum=1. / circulant_operator.spectrum, is_non_singular=circulant_operator.is_non_singular, is_self_adjoint=circulant_operator.is_self_adjoint, is_positive_definite=circulant_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterInverse( linear_operator_householder.LinearOperatorHouseholder) def _inverse_householder(householder_operator): return householder_operator
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/inverse_registrations.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Public API for tf.linalg namespace.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import,unused-import from tensorflow.python.ops.linalg import adjoint_registrations as _adjoint_registrations from tensorflow.python.ops.linalg import cholesky_registrations as _cholesky_registrations from tensorflow.python.ops.linalg import inverse_registrations as _inverse_registrations from tensorflow.python.ops.linalg import linear_operator_algebra as _linear_operator_algebra from tensorflow.python.ops.linalg import matmul_registrations as _matmul_registrations from tensorflow.python.ops.linalg import solve_registrations as _solve_registrations from tensorflow.python.ops.linalg.linalg_impl import * from tensorflow.python.ops.linalg.linear_operator import * from tensorflow.python.ops.linalg.linear_operator_block_diag import * from tensorflow.python.ops.linalg.linear_operator_circulant import * from tensorflow.python.ops.linalg.linear_operator_composition import * from tensorflow.python.ops.linalg.linear_operator_diag import * from tensorflow.python.ops.linalg.linear_operator_full_matrix import * from tensorflow.python.ops.linalg.linear_operator_identity import * from tensorflow.python.ops.linalg.linear_operator_kronecker import * from tensorflow.python.ops.linalg.linear_operator_low_rank_update import * from tensorflow.python.ops.linalg.linear_operator_lower_triangular import * from tensorflow.python.ops.linalg.linear_operator_toeplitz import * from tensorflow.python.ops.linalg.linear_operator_zeros import * # pylint: enable=wildcard-import # Seal API. # pylint: disable=undefined-variable del absolute_import del division del print_function del ops del array_ops del gen_linalg_ops del linalg_ops del math_ops del special_math_ops del tf_export # pylint: enable=undefined-variable
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linalg.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` acting like a zero matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorZeros", ] @tf_export("linalg.LinearOperatorZeros") class LinearOperatorZeros(linear_operator.LinearOperator): """`LinearOperator` acting like a [batch] zero matrix. This operator acts like a [batch] zero matrix `A` with shape `[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x M` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. `LinearOperatorZeros` is initialized with `num_rows`, and optionally `num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is `None`, then this operator will be initialized as a square matrix. If `batch_shape` is `None`, this operator efficiently passes through all arguments. If `batch_shape` is provided, broadcasting may occur, which will require making copies. ```python # Create a 2 x 2 zero matrix. operator = LinearOperatorZero(num_rows=2, dtype=tf.float32) operator.to_dense() ==> [[0., 0.] [0., 0.]] operator.shape ==> [2, 2] operator.determinant() ==> 0. x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor, same as x. # Create a 2-batch of 2x2 zero matrices operator = LinearOperatorZeros(num_rows=2, batch_shape=[2]) operator.to_dense() ==> [[[0., 0.] [0., 0.]], [[0., 0.] [0., 0.]]] # Here, even though the operator has a batch shape, the input is the same as # the output, so x can be passed through without a copy. The operator is able # to detect that no broadcast is necessary because both x and the operator # have statically defined shape. x = ... Shape [2, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x) # Here the operator and x have different batch_shape, and are broadcast. # This requires a copy, since the output is different size than the input. x = ... Shape [1, 2, 3] operator.matmul(x) ==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x]) ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [N, M], with b >= 0 x.shape = [C1,...,Cc] + [M, R], and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd] ``` #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, num_rows, num_columns=None, batch_shape=None, dtype=None, is_non_singular=False, is_self_adjoint=True, is_positive_definite=False, is_square=True, assert_proper_shapes=False, name="LinearOperatorZeros"): r"""Initialize a `LinearOperatorZeros`. The `LinearOperatorZeros` is initialized with arguments defining `dtype` and shape. This operator is able to broadcast the leading (batch) dimensions, which sometimes requires copying data. If `batch_shape` is `None`, the operator can take arguments of any batch shape without copying. See examples. Args: num_rows: Scalar non-negative integer `Tensor`. Number of rows in the corresponding zero matrix. num_columns: Scalar non-negative integer `Tensor`. Number of columns in the corresponding zero matrix. If `None`, defaults to the value of `num_rows`. batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading dimensions. If `None`, this operator has no leading dimensions. dtype: Data type of the matrix that this operator represents. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. assert_proper_shapes: Python `bool`. If `False`, only perform static checks that initialization and method arguments have proper shape. If `True`, and static checks are inconclusive, add asserts to the graph. name: A name for this `LinearOperator` Raises: ValueError: If `num_rows` is determined statically to be non-scalar, or negative. ValueError: If `num_columns` is determined statically to be non-scalar, or negative. ValueError: If `batch_shape` is determined statically to not be 1-D, or negative. ValueError: If any of the following is not `True`: `{is_self_adjoint, is_non_singular, is_positive_definite}`. """ dtype = dtype or dtypes.float32 self._assert_proper_shapes = assert_proper_shapes with ops.name_scope(name): dtype = dtypes.as_dtype(dtype) if not is_self_adjoint and is_square: raise ValueError("A zero operator is always self adjoint.") if is_non_singular: raise ValueError("A zero operator is always singular.") if is_positive_definite: raise ValueError("A zero operator is always not positive-definite.") super(LinearOperatorZeros, self).__init__( dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) linear_operator_util.assert_not_ref_type(num_rows, "num_rows") linear_operator_util.assert_not_ref_type(num_columns, "num_columns") linear_operator_util.assert_not_ref_type(batch_shape, "batch_shape") self._num_rows = linear_operator_util.shape_tensor( num_rows, name="num_rows") self._num_rows_static = tensor_util.constant_value(self._num_rows) if num_columns is None: num_columns = num_rows self._num_columns = linear_operator_util.shape_tensor( num_columns, name="num_columns") self._num_columns_static = tensor_util.constant_value(self._num_columns) self._check_domain_range_possibly_add_asserts() if (self._num_rows_static is not None and self._num_columns_static is not None): if is_square and self._num_rows_static != self._num_columns_static: raise ValueError( "LinearOperatorZeros initialized as is_square=True, but got " "num_rows({}) != num_columns({})".format( self._num_rows_static, self._num_columns_static)) if batch_shape is None: self._batch_shape_arg = None else: self._batch_shape_arg = linear_operator_util.shape_tensor( batch_shape, name="batch_shape_arg") self._batch_shape_static = tensor_util.constant_value( self._batch_shape_arg) self._check_batch_shape_possibly_add_asserts() def _shape(self): matrix_shape = tensor_shape.TensorShape((self._num_rows_static, self._num_columns_static)) if self._batch_shape_arg is None: return matrix_shape batch_shape = tensor_shape.TensorShape(self._batch_shape_static) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): matrix_shape = array_ops.stack((self._num_rows, self._num_columns), axis=0) if self._batch_shape_arg is None: return matrix_shape return array_ops.concat((self._batch_shape_arg, matrix_shape), 0) def _assert_non_singular(self): raise errors.InvalidArgumentError( node_def=None, op=None, message="Zero operators are always " "non-invertible.") def _assert_positive_definite(self): raise errors.InvalidArgumentError( node_def=None, op=None, message="Zero operators are always " "non-positive definite.") def _assert_self_adjoint(self): return control_flow_ops.no_op("assert_self_adjoint") def _possibly_broadcast_batch_shape(self, x): """Return 'x', possibly after broadcasting the leading dimensions.""" # If we have no batch shape, our batch shape broadcasts with everything! if self._batch_shape_arg is None: return x # Static attempt: # If we determine that no broadcast is necessary, pass x through # If we need a broadcast, add to an array of zeros. # # special_shape is the shape that, when broadcast with x's shape, will give # the correct broadcast_shape. Note that # We have already verified the second to last dimension of self.shape # matches x's shape in assert_compatible_matrix_dimensions. # Also, the final dimension of 'x' can have any shape. # Therefore, the final two dimensions of special_shape are 1's. special_shape = self.batch_shape.concatenate([1, 1]) bshape = array_ops.broadcast_static_shape(x.shape, special_shape) if special_shape.is_fully_defined(): # bshape.is_fully_defined iff special_shape.is_fully_defined. if bshape == x.shape: return x # Use the built in broadcasting of addition. zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) return x + zeros # Dynamic broadcast: # Always add to an array of zeros, rather than using a "cond", since a # cond would require copying data from GPU --> CPU. special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0) zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype) return x + zeros def _matmul(self, x, adjoint=False, adjoint_arg=False): if self._assert_proper_shapes: x = linalg.adjoint(x) if adjoint_arg else x aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x) x = control_flow_ops.with_dependencies([aps], x) if self.is_square: # Note that adjoint has no effect since this matrix is self-adjoint. if adjoint_arg: output_shape = array_ops.concat([ array_ops.shape(x)[:-2], [array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0) else: output_shape = array_ops.shape(x) return self._possibly_broadcast_batch_shape( array_ops.zeros(shape=output_shape, dtype=x.dtype)) x_shape = array_ops.shape(x) n = self._num_columns if adjoint else self._num_rows m = x_shape[-2] if adjoint_arg else x_shape[-1] output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0) zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype) return self._possibly_broadcast_batch_shape(zeros) def _determinant(self): if self.batch_shape.is_fully_defined(): return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) else: return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) def _trace(self): # Get Tensor of all zeros of same shape as self.batch_shape. if self.batch_shape.is_fully_defined(): return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype) else: return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype) def _diag_part(self): return self._zeros_diag() def add_to_tensor(self, mat, name="add_to_tensor"): """Add matrix represented by this operator to `mat`. Equiv to `I + mat`. Args: mat: `Tensor` with same `dtype` and shape broadcastable to `self`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`. """ return self._possibly_broadcast_batch_shape(mat) def _check_domain_range_possibly_add_asserts(self): """Static check of init arg `num_rows`, possibly add asserts.""" # Possibly add asserts. if self._assert_proper_shapes: self._num_rows = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._num_rows, 0, message="Argument num_rows must be a 0-D Tensor."), check_ops.assert_non_negative( self._num_rows, message="Argument num_rows must be non-negative."), ], self._num_rows) self._num_columns = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._num_columns, 0, message="Argument num_columns must be a 0-D Tensor."), check_ops.assert_non_negative( self._num_columns, message="Argument num_columns must be non-negative."), ], self._num_columns) # Static checks. if not self._num_rows.dtype.is_integer: raise TypeError("Argument num_rows must be integer type. Found:" " %s" % self._num_rows) if not self._num_columns.dtype.is_integer: raise TypeError("Argument num_columns must be integer type. Found:" " %s" % self._num_columns) num_rows_static = self._num_rows_static num_columns_static = self._num_columns_static if num_rows_static is not None: if num_rows_static.ndim != 0: raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" " %s" % num_rows_static) if num_rows_static < 0: raise ValueError("Argument num_rows must be non-negative. Found:" " %s" % num_rows_static) if num_columns_static is not None: if num_columns_static.ndim != 0: raise ValueError("Argument num_columns must be a 0-D Tensor. Found:" " %s" % num_columns_static) if num_columns_static < 0: raise ValueError("Argument num_columns must be non-negative. Found:" " %s" % num_columns_static) def _check_batch_shape_possibly_add_asserts(self): """Static check of init arg `batch_shape`, possibly add asserts.""" if self._batch_shape_arg is None: return # Possibly add asserts if self._assert_proper_shapes: self._batch_shape_arg = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._batch_shape_arg, 1, message="Argument batch_shape must be a 1-D Tensor."), check_ops.assert_non_negative( self._batch_shape_arg, message="Argument batch_shape must be non-negative."), ], self._batch_shape_arg) # Static checks if not self._batch_shape_arg.dtype.is_integer: raise TypeError("Argument batch_shape must be integer type. Found:" " %s" % self._batch_shape_arg) if self._batch_shape_static is None: return # Cannot do any other static checks. if self._batch_shape_static.ndim != 1: raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:" " %s" % self._batch_shape_static) if np.any(self._batch_shape_static < 0): raise ValueError("Argument batch_shape must be non-negative. Found:" "%s" % self._batch_shape_static) def _min_matrix_dim(self): """Minimum of domain/range dimension, if statically available, else None.""" domain_dim = self.domain_dimension.value range_dim = self.range_dimension.value if domain_dim is None or range_dim is None: return None return min(domain_dim, range_dim) def _min_matrix_dim_tensor(self): """Minimum of domain/range dimension, as a tensor.""" return math_ops.reduce_min(self.shape_tensor()[-2:]) def _zeros_diag(self): """Returns the diagonal of this operator as all zeros.""" if self.shape.is_fully_defined(): d_shape = self.batch_shape.concatenate([self._min_matrix_dim()]) else: d_shape = array_ops.concat( [self.batch_shape_tensor(), [self._min_matrix_dim_tensor()]], axis=0) return array_ops.zeros(shape=d_shape, dtype=self.dtype)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_zeros.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Add one or more `LinearOperators` efficiently.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_full_matrix from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_lower_triangular __all__ = [] def add_operators(operators, operator_name=None, addition_tiers=None, name=None): """Efficiently add one or more linear operators. Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of operators `[B1, B2,...]` such that ```sum_k Ak.matmul(x) = sum_k Bk.matmul(x).``` The operators `Bk` result by adding some of the `Ak`, as allowed by `addition_tiers`. Example of efficient adding of diagonal operators. ```python A1 = LinearOperatorDiag(diag=[1., 1.], name="A1") A2 = LinearOperatorDiag(diag=[2., 2.], name="A2") # Use two tiers, the first contains an Adder that returns Diag. Since both # A1 and A2 are Diag, they can use this Adder. The second tier will not be # used. addition_tiers = [ [_AddAndReturnDiag()], [_AddAndReturnMatrix()]] B_list = add_operators([A1, A2], addition_tiers=addition_tiers) len(B_list) ==> 1 B_list[0].__class__.__name__ ==> 'LinearOperatorDiag' B_list[0].to_dense() ==> [[3., 0.], [0., 3.]] B_list[0].name ==> 'Add/A1__A2/' ``` Args: operators: Iterable of `LinearOperator` objects with same `dtype`, domain and range dimensions, and broadcastable batch shapes. operator_name: String name for returned `LinearOperator`. Defaults to concatenation of "Add/A__B/" that indicates the order of addition steps. addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i` is a list of `Adder` objects. This function attempts to do all additions in tier `i` before trying tier `i + 1`. name: A name for this `Op`. Defaults to `add_operators`. Returns: Subclass of `LinearOperator`. Class and order of addition may change as new (and better) addition strategies emerge. Raises: ValueError: If `operators` argument is empty. ValueError: If shapes are incompatible. """ # Default setting if addition_tiers is None: addition_tiers = _DEFAULT_ADDITION_TIERS # Argument checking. check_ops.assert_proper_iterable(operators) operators = list(reversed(operators)) if len(operators) < 1: raise ValueError( "Argument 'operators' must contain at least one operator. " "Found: %s" % operators) if not all( isinstance(op, linear_operator.LinearOperator) for op in operators): raise TypeError( "Argument 'operators' must contain only LinearOperator instances. " "Found: %s" % operators) _static_check_for_same_dimensions(operators) _static_check_for_broadcastable_batch_shape(operators) graph_parents = [] for operator in operators: graph_parents.extend(operator.graph_parents) with ops.name_scope(name or "add_operators", values=graph_parents): # Additions done in one of the tiers. Try tier 0, 1,... ops_to_try_at_next_tier = list(operators) for tier in addition_tiers: ops_to_try_at_this_tier = ops_to_try_at_next_tier ops_to_try_at_next_tier = [] while ops_to_try_at_this_tier: op1 = ops_to_try_at_this_tier.pop() op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier) if op2 is not None: # Will try to add the result of this again at this same tier. new_operator = adder.add(op1, op2, operator_name) ops_to_try_at_this_tier.append(new_operator) else: ops_to_try_at_next_tier.append(op1) return ops_to_try_at_next_tier def _pop_a_match_at_tier(op1, operator_list, tier): # Search from the back of list to the front in order to create nice default # order of operations. for i in range(1, len(operator_list) + 1): op2 = operator_list[-i] for adder in tier: if adder.can_add(op1, op2): return operator_list.pop(-i), adder return None, None def _infer_hints_allowing_override(op1, op2, hints): """Infer hints from op1 and op2. hints argument is an override. Args: op1: LinearOperator op2: LinearOperator hints: _Hints object holding "is_X" boolean hints to use for returned operator. If some hint is None, try to set using op1 and op2. If the hint is provided, ignore op1 and op2 hints. This allows an override of previous hints, but does not allow forbidden hints (e.g. you still cannot say a real diagonal operator is not self-adjoint. Returns: _Hints object. """ hints = hints or _Hints() # If A, B are self-adjoint, then so is A + B. if hints.is_self_adjoint is None: is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint else: is_self_adjoint = hints.is_self_adjoint # If A, B are positive definite, then so is A + B. if hints.is_positive_definite is None: is_positive_definite = op1.is_positive_definite and op2.is_positive_definite else: is_positive_definite = hints.is_positive_definite # A positive definite operator is always non-singular. if is_positive_definite and hints.is_positive_definite is None: is_non_singular = True else: is_non_singular = hints.is_non_singular return _Hints( is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite) def _static_check_for_same_dimensions(operators): """ValueError if operators determined to have different dimensions.""" if len(operators) < 2: return domain_dimensions = [ (op.name, tensor_shape.dimension_value(op.domain_dimension)) for op in operators if tensor_shape.dimension_value(op.domain_dimension) is not None] if len(set(value for name, value in domain_dimensions)) > 1: raise ValueError("Operators must have the same domain dimension. Found: %s" % domain_dimensions) range_dimensions = [ (op.name, tensor_shape.dimension_value(op.range_dimension)) for op in operators if tensor_shape.dimension_value(op.range_dimension) is not None] if len(set(value for name, value in range_dimensions)) > 1: raise ValueError("Operators must have the same range dimension. Found: %s" % range_dimensions) def _static_check_for_broadcastable_batch_shape(operators): """ValueError if operators determined to have non-broadcastable shapes.""" if len(operators) < 2: return # This will fail if they cannot be broadcast together. batch_shape = operators[0].batch_shape for op in operators[1:]: batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape) class _Hints(object): """Holds 'is_X' flags that every LinearOperator is initialized with.""" def __init__(self, is_non_singular=None, is_positive_definite=None, is_self_adjoint=None): self.is_non_singular = is_non_singular self.is_positive_definite = is_positive_definite self.is_self_adjoint = is_self_adjoint ################################################################################ # Classes to add two linear operators. ################################################################################ @six.add_metaclass(abc.ABCMeta) class _Adder(object): """Abstract base class to add two operators. Each `Adder` acts independently, adding everything it can, paying no attention as to whether another `Adder` could have done the addition more efficiently. """ @property def name(self): return self.__class__.__name__ @abc.abstractmethod def can_add(self, op1, op2): """Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`.""" pass @abc.abstractmethod def _add(self, op1, op2, operator_name, hints): # Derived classes can assume op1 and op2 have been validated, e.g. they have # the same dtype, and their domain/range dimensions match. pass def add(self, op1, op2, operator_name, hints=None): """Return new `LinearOperator` acting like `op1 + op2`. Args: op1: `LinearOperator` op2: `LinearOperator`, with `shape` and `dtype` such that adding to `op1` is allowed. operator_name: `String` name to give to returned `LinearOperator` hints: `_Hints` object. Returned `LinearOperator` will be created with these hints. Returns: `LinearOperator` """ updated_hints = _infer_hints_allowing_override(op1, op2, hints) if operator_name is None: operator_name = "Add/" + op1.name + "__" + op2.name + "/" values = op1.graph_parents + op2.graph_parents scope_name = self.name if scope_name.startswith("_"): scope_name = scope_name[1:] with ops.name_scope(scope_name, values=values): return self._add(op1, op2, operator_name, updated_hints) class _AddAndReturnScaledIdentity(_Adder): """Handles additions resulting in an Identity family member. The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family is closed under addition. This `Adder` respects that, and returns an Identity """ def can_add(self, op1, op2): types = {_type(op1), _type(op2)} return not types.difference(_IDENTITY_FAMILY) def _add(self, op1, op2, operator_name, hints): # Will build a LinearOperatorScaledIdentity. if _type(op1) == _SCALED_IDENTITY: multiplier_1 = op1.multiplier else: multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype) if _type(op2) == _SCALED_IDENTITY: multiplier_2 = op2.multiplier else: multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype) return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=op1.range_dimension_tensor(), multiplier=multiplier_1 + multiplier_2, is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name) class _AddAndReturnDiag(_Adder): """Handles additions resulting in a Diag operator.""" def can_add(self, op1, op2): types = {_type(op1), _type(op2)} return not types.difference(_DIAG_LIKE) def _add(self, op1, op2, operator_name, hints): return linear_operator_diag.LinearOperatorDiag( diag=op1.diag_part() + op2.diag_part(), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name) class _AddAndReturnTriL(_Adder): """Handles additions resulting in a TriL operator.""" def can_add(self, op1, op2): types = {_type(op1), _type(op2)} return not types.difference(_DIAG_LIKE.union({_TRIL})) def _add(self, op1, op2, operator_name, hints): if _type(op1) in _EFFICIENT_ADD_TO_TENSOR: op_add_to_tensor, op_other = op1, op2 else: op_add_to_tensor, op_other = op2, op1 return linear_operator_lower_triangular.LinearOperatorLowerTriangular( tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name) class _AddAndReturnMatrix(_Adder): """"Handles additions resulting in a `LinearOperatorFullMatrix`.""" def can_add(self, op1, op2): # pylint: disable=unused-argument return isinstance(op1, linear_operator.LinearOperator) and isinstance( op2, linear_operator.LinearOperator) def _add(self, op1, op2, operator_name, hints): if _type(op1) in _EFFICIENT_ADD_TO_TENSOR: op_add_to_tensor, op_other = op1, op2 else: op_add_to_tensor, op_other = op2, op1 return linear_operator_full_matrix.LinearOperatorFullMatrix( matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()), is_non_singular=hints.is_non_singular, is_self_adjoint=hints.is_self_adjoint, is_positive_definite=hints.is_positive_definite, name=operator_name) ################################################################################ # Constants designating types of LinearOperators ################################################################################ # Type name constants for LinearOperator classes. _IDENTITY = "identity" _SCALED_IDENTITY = "scaled_identity" _DIAG = "diag" _TRIL = "tril" _MATRIX = "matrix" # Groups of operators. _DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY} _IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY} # operators with an efficient .add_to_tensor() method. _EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE def _type(operator): """Returns the type name constant (e.g. _TRIL) for operator.""" if isinstance(operator, linear_operator_diag.LinearOperatorDiag): return _DIAG if isinstance(operator, linear_operator_lower_triangular.LinearOperatorLowerTriangular): return _TRIL if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix): return _MATRIX if isinstance(operator, linear_operator_identity.LinearOperatorIdentity): return _IDENTITY if isinstance(operator, linear_operator_identity.LinearOperatorScaledIdentity): return _SCALED_IDENTITY raise TypeError("Operator type unknown: %s" % operator) ################################################################################ # Addition tiers: # We attempt to use Adders in tier K before K+1. # # Organize tiers to # (i) reduce O(..) complexity of forming final operator, and # (ii) produce the "most efficient" final operator. # Dev notes: # * Results of addition at tier K will be added at tier K or higher. # * Tiers may change, and we warn the user that it may change. ################################################################################ # Note that the final tier, _AddAndReturnMatrix, will convert everything to a # dense matrix. So it is sometimes very inefficient. _DEFAULT_ADDITION_TIERS = [ [_AddAndReturnScaledIdentity()], [_AddAndReturnDiag()], [_AddAndReturnTriL()], [_AddAndReturnMatrix()], ]
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_addition.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Perturb a `LinearOperator` with a rank `K` update.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorLowRankUpdate", ] @tf_export("linalg.LinearOperatorLowRankUpdate") class LinearOperatorLowRankUpdate(linear_operator.LinearOperator): """Perturb a `LinearOperator` with a rank `K` update. This operator acts like a [batch] matrix `A` with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `M x N` matrix. `LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where ``` L, is a LinearOperator representing [batch] M x N matrices U, is a [batch] M x K matrix. Typically K << M. D, is a [batch] K x K matrix. V, is a [batch] N x K matrix. Typically K << N. V^H is the Hermitian transpose (adjoint) of V. ``` If `M = N`, determinants and solves are done using the matrix determinant lemma and Woodbury identities, and thus require L and D to be non-singular. Solves and determinants will be attempted unless the "is_non_singular" property of L and D is False. In the event that L and D are positive-definite, and U = V, solves and determinants can be done using a Cholesky factorization. ```python # Create a 3 x 3 diagonal linear operator. diag_operator = LinearOperatorDiag( diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True, is_positive_definite=True) # Perturb with a rank 2 perturbation operator = LinearOperatorLowRankUpdate( operator=diag_operator, u=[[1., 2.], [-1., 3.], [0., 0.]], diag_update=[11., 12.], v=[[1., 2.], [-1., 3.], [10., 10.]]) operator.shape ==> [3, 3] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [3, 4] Tensor operator.matmul(x) ==> Shape [3, 4] Tensor ``` ### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` ### Performance Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`, made from a rank `K` update of `base_operator` which performs `.matmul(x)` on `x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly for `solve`, `determinant`. Then, if `x.shape = [N, R]`, * `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)` and if `M = N`, * `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)` * `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)` If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular`, `self_adjoint`, `positive_definite`, `diag_update_positive` and `square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, base_operator, u, diag_update=None, v=None, is_diag_update_positive=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorLowRankUpdate"): """Initialize a `LinearOperatorLowRankUpdate`. This creates a `LinearOperator` of the form `A = L + U D V^H`, with `L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch] diagonal matrix. If `L` is non-singular, solves and determinants are available. Solves/determinants both involve a solve/determinant of a `K x K` system. In the event that L and D are self-adjoint positive-definite, and U = V, this can be done using a Cholesky factorization. The user should set the `is_X` matrix property hints, which will trigger the appropriate code path. Args: base_operator: Shape `[B1,...,Bb, M, N]`. u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`. This is `U` above. diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype` as `base_operator`. This is the diagonal of `D` above. Defaults to `D` being the identity operator. v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]` Defaults to `v = u`, in which case the perturbation is symmetric. If `M != N`, then `v` must be set since the perturbation is not square. is_diag_update_positive: Python `bool`. If `True`, expect `diag_update > 0`. is_non_singular: Expect that this operator is non-singular. Default is `None`, unless `is_positive_definite` is auto-set to be `True` (see below). is_self_adjoint: Expect that this operator is equal to its hermitian transpose. Default is `None`, unless `base_operator` is self-adjoint and `v = None` (meaning `u=v`), in which case this defaults to `True`. is_positive_definite: Expect that this operator is positive definite. Default is `None`, unless `base_operator` is positive-definite `v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case this defaults to `True`. Note that we say an operator is positive definite when the quadratic form `x^H A x` has positive real part for all nonzero `x`. is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If `is_X` flags are set in an inconsistent way. """ dtype = base_operator.dtype if diag_update is not None: if is_diag_update_positive and dtype.is_complex: logging.warn("Note: setting is_diag_update_positive with a complex " "dtype means that diagonal is real and positive.") if diag_update is None: if is_diag_update_positive is False: raise ValueError( "Default diagonal is the identity, which is positive. However, " "user set 'is_diag_update_positive' to False.") is_diag_update_positive = True # In this case, we can use a Cholesky decomposition to help us solve/det. self._use_cholesky = ( base_operator.is_positive_definite and base_operator.is_self_adjoint and is_diag_update_positive and v is None) # Possibly auto-set some characteristic flags from None to True. # If the Flags were set (by the user) incorrectly to False, then raise. if base_operator.is_self_adjoint and v is None and not dtype.is_complex: if is_self_adjoint is False: raise ValueError( "A = L + UDU^H, with L self-adjoint and D real diagonal. Since" " UDU^H is self-adjoint, this must be a self-adjoint operator.") is_self_adjoint = True # The condition for using a cholesky is sufficient for SPD, and # we no weaker choice of these hints leads to SPD. Therefore, # the following line reads "if hints indicate SPD..." if self._use_cholesky: if ( is_positive_definite is False or is_self_adjoint is False or is_non_singular is False): raise ValueError( "Arguments imply this is self-adjoint positive-definite operator.") is_positive_definite = True is_self_adjoint = True values = base_operator.graph_parents + [u, diag_update, v] with ops.name_scope(name, values=values): # Create U and V. self._u = linear_operator_util.convert_nonref_to_tensor(u, name="u") if v is None: self._v = self._u else: self._v = linear_operator_util.convert_nonref_to_tensor(v, name="v") if diag_update is None: self._diag_update = None else: self._diag_update = linear_operator_util.convert_nonref_to_tensor( diag_update, name="diag_update") # Create base_operator L. self._base_operator = base_operator graph_parents = base_operator.graph_parents + [ self.u, self._diag_update, self.v] graph_parents = [p for p in graph_parents if p is not None] super(LinearOperatorLowRankUpdate, self).__init__( dtype=self._base_operator.dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) # Create the diagonal operator D. self._set_diag_operators(diag_update, is_diag_update_positive) self._is_diag_update_positive = is_diag_update_positive self._check_shapes() def _check_shapes(self): """Static check that shapes are compatible.""" # Broadcast shape also checks that u and v are compatible. uv_shape = array_ops.broadcast_static_shape( self.u.shape, self.v.shape) batch_shape = array_ops.broadcast_static_shape( self.base_operator.batch_shape, uv_shape[:-2]) tensor_shape.Dimension( self.base_operator.domain_dimension).assert_is_compatible_with( uv_shape[-2]) if self._diag_update is not None: tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with( self._diag_update.shape[-1]) array_ops.broadcast_static_shape( batch_shape, self._diag_update.shape[:-1]) def _set_diag_operators(self, diag_update, is_diag_update_positive): """Set attributes self._diag_update and self._diag_operator.""" if diag_update is not None: self._diag_operator = linear_operator_diag.LinearOperatorDiag( self._diag_update, is_positive_definite=is_diag_update_positive) else: if tensor_shape.dimension_value(self.u.shape[-1]) is not None: r = tensor_shape.dimension_value(self.u.shape[-1]) else: r = array_ops.shape(self.u)[-1] self._diag_operator = linear_operator_identity.LinearOperatorIdentity( num_rows=r, dtype=self.dtype) @property def u(self): """If this operator is `A = L + U D V^H`, this is the `U`.""" return self._u @property def v(self): """If this operator is `A = L + U D V^H`, this is the `V`.""" return self._v @property def is_diag_update_positive(self): """If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise.""" return self._is_diag_update_positive @property def diag_update(self): """If this operator is `A = L + U D V^H`, this is the diagonal of `D`.""" return self._diag_update @property def diag_operator(self): """If this operator is `A = L + U D V^H`, this is `D`.""" return self._diag_operator @property def base_operator(self): """If this operator is `A = L + U D V^H`, this is the `L`.""" return self._base_operator def _shape(self): batch_shape = array_ops.broadcast_static_shape( self.base_operator.batch_shape, self.u.shape[:-2]) return batch_shape.concatenate(self.base_operator.shape[-2:]) def _shape_tensor(self): batch_shape = array_ops.broadcast_dynamic_shape( self.base_operator.batch_shape_tensor(), array_ops.shape(self.u)[:-2]) return array_ops.concat( [batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0) def _matmul(self, x, adjoint=False, adjoint_arg=False): u = self.u v = self.v l = self.base_operator d = self.diag_operator leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) if adjoint: uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg) d_uh_x = d.matmul(uh_x, adjoint=adjoint) v_d_uh_x = math_ops.matmul(v, d_uh_x) return leading_term + v_d_uh_x else: vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg) d_vh_x = d.matmul(vh_x, adjoint=adjoint) u_d_vh_x = math_ops.matmul(u, d_vh_x) return leading_term + u_d_vh_x def _determinant(self): if self.is_positive_definite: return math_ops.exp(self.log_abs_determinant()) # The matrix determinant lemma gives # https://en.wikipedia.org/wiki/Matrix_determinant_lemma # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L) # = det(C) det(D) det(L) # where C is sometimes known as the capacitance matrix, # C := D^{-1} + V^H L^{-1} U det_c = linalg_ops.matrix_determinant(self._make_capacitance()) det_d = self.diag_operator.determinant() det_l = self.base_operator.determinant() return det_c * det_d * det_l def _log_abs_determinant(self): # Recall # det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L) # = det(C) det(D) det(L) log_abs_det_d = self.diag_operator.log_abs_determinant() log_abs_det_l = self.base_operator.log_abs_determinant() if self._use_cholesky: chol_cap_diag = array_ops.matrix_diag_part( linalg_ops.cholesky(self._make_capacitance())) log_abs_det_c = 2 * math_ops.reduce_sum( math_ops.log(chol_cap_diag), axis=[-1]) else: det_c = linalg_ops.matrix_determinant(self._make_capacitance()) log_abs_det_c = math_ops.log(math_ops.abs(det_c)) if self.dtype.is_complex: log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype) return log_abs_det_c + log_abs_det_d + log_abs_det_l def _solve(self, rhs, adjoint=False, adjoint_arg=False): if self.base_operator.is_non_singular is False: raise ValueError( "Solve not implemented unless this is a perturbation of a " "non-singular LinearOperator.") # The Woodbury formula gives: # https://en.wikipedia.org/wiki/Woodbury_matrix_identity # (L + UDV^H)^{-1} # = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1} # = L^{-1} - L^{-1} U C^{-1} V^H L^{-1} # where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U # Note also that, with ^{-H} being the inverse of the adjoint, # (L + UDV^H)^{-H} # = L^{-H} - L^{-H} V C^{-H} U^H L^{-H} l = self.base_operator if adjoint: v = self.u u = self.v else: v = self.v u = self.u # L^{-1} rhs linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) # V^H L^{-1} rhs vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True) # C^{-1} V^H L^{-1} rhs if self._use_cholesky: capinv_vh_linv_rhs = linear_operator_util.cholesky_solve_with_broadcast( linalg_ops.cholesky(self._make_capacitance()), vh_linv_rhs) else: capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast( self._make_capacitance(), vh_linv_rhs, adjoint=adjoint) # U C^{-1} V^H M^{-1} rhs u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs) # L^{-1} U C^{-1} V^H L^{-1} rhs linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint) # L^{-1} - L^{-1} U C^{-1} V^H L^{-1} return linv_rhs - linv_u_capinv_vh_linv_rhs def _make_capacitance(self): # C := D^{-1} + V^H L^{-1} U # which is sometimes known as the "capacitance" matrix. # L^{-1} U linv_u = self.base_operator.solve(self.u) # V^H L^{-1} U vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True) # D^{-1} + V^H L^{-1} V capacitance = self._diag_operator.inverse().add_to_tensor(vh_linv_u) return capacitance
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` coming from a [[nested] block] circulant matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import util as distribution_util from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.ops.signal import fft_ops from tensorflow.python.util.tf_export import tf_export __all__ = [ "LinearOperatorCirculant", "LinearOperatorCirculant2D", "LinearOperatorCirculant3D", ] # Different FFT Ops will be used for different block depths. _FFT_OP = {1: fft_ops.fft, 2: fft_ops.fft2d, 3: fft_ops.fft3d} _IFFT_OP = {1: fft_ops.ifft, 2: fft_ops.ifft2d, 3: fft_ops.ifft3d} # TODO(langmore) Add transformations that create common spectrums, e.g. # starting with the convolution kernel # start with half a spectrum, and create a Hermitian one. # common filters. # TODO(langmore) Support rectangular Toeplitz matrices. class _BaseLinearOperatorCirculant(linear_operator.LinearOperator): """Base class for circulant operators. Not user facing. `LinearOperator` acting like a [batch] [[nested] block] circulant matrix. """ def __init__(self, spectrum, block_depth, input_output_dtype=dtypes.complex64, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name="LinearOperatorCirculant"): r"""Initialize an `_BaseLinearOperatorCirculant`. Args: spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Type can be different than `input_output_dtype` block_depth: Python integer, either 1, 2, or 3. Will be 1 for circulant, 2 for block circulant, and 3 for nested block circulant. input_output_dtype: `dtype` for input/output. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `spectrum` is real, this will always be true. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name to prepend to all ops created by this class. Raises: ValueError: If `block_depth` is not an allowed value. TypeError: If `spectrum` is not an allowed type. """ allowed_block_depths = [1, 2, 3] self._name = name if block_depth not in allowed_block_depths: raise ValueError("Expected block_depth to be in %s. Found: %s." % (allowed_block_depths, block_depth)) self._block_depth = block_depth with ops.name_scope(name, values=[spectrum]): self._spectrum = self._check_spectrum_and_return_tensor(spectrum) # Check and auto-set hints. if not self.spectrum.dtype.is_complex: if is_self_adjoint is False: raise ValueError( "A real spectrum always corresponds to a self-adjoint operator.") is_self_adjoint = True if is_square is False: raise ValueError( "A [[nested] block] circulant operator is always square.") is_square = True # If spectrum.shape = [s0, s1, s2], and block_depth = 2, # block_shape = [s1, s2] s_shape = array_ops.shape(self.spectrum) self._block_shape_tensor = s_shape[-self.block_depth:] # Add common variants of spectrum to the graph. self._spectrum_complex = _to_complex(self.spectrum) self._abs_spectrum = math_ops.abs(self.spectrum) self._conj_spectrum = math_ops.conj(self._spectrum_complex) super(_BaseLinearOperatorCirculant, self).__init__( dtype=dtypes.as_dtype(input_output_dtype), graph_parents=[self.spectrum], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_spectrum_and_return_tensor(self, spectrum): """Static check of spectrum. Then return `Tensor` version.""" spectrum = ops.convert_to_tensor(spectrum, name="spectrum") if spectrum.shape.ndims is not None: if spectrum.shape.ndims < self.block_depth: raise ValueError( "Argument spectrum must have at least %d dimensions. Found: %s" % (self.block_depth, spectrum)) return spectrum @property def block_depth(self): """Depth of recursively defined circulant blocks defining this `Operator`. With `A` the dense representation of this `Operator`, `block_depth = 1` means `A` is symmetric circulant. For example, ``` A = |w z y x| |x w z y| |y x w z| |z y x w| ``` `block_depth = 2` means `A` is block symmetric circulant with symemtric circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` `block_depth = 3` means `A` is block symmetric circulant with block symmetric circulant blocks. Returns: Python `integer`. """ return self._block_depth def block_shape_tensor(self): """Shape of the block dimensions of `self.spectrum`.""" return self._block_shape_tensor @property def block_shape(self): return self.spectrum.shape[-self.block_depth:] @property def spectrum(self): return self._spectrum def _vectorize_then_blockify(self, matrix): """Shape batch matrix to batch vector, then blockify trailing dimensions.""" # Suppose # matrix.shape = [m0, m1, m2, m3], # and matrix is a matrix because the final two dimensions are matrix dims. # self.block_depth = 2, # self.block_shape = [b0, b1] (note b0 * b1 = m2). # We will reshape matrix to # [m3, m0, m1, b0, b1]. # Vectorize: Reshape to batch vector. # [m0, m1, m2, m3] --> [m3, m0, m1, m2] # This is called "vectorize" because we have taken the final two matrix dims # and turned this into a size m3 batch of vectors. vec = distribution_util.rotate_transpose(matrix, shift=1) # Blockify: Blockfy trailing dimensions. # [m3, m0, m1, m2] --> [m3, m0, m1, b0, b1] if (vec.shape.is_fully_defined() and self.block_shape.is_fully_defined()): # vec_leading_shape = [m3, m0, m1], # the parts of vec that will not be blockified. vec_leading_shape = vec.shape[:-1] final_shape = vec_leading_shape.concatenate(self.block_shape) else: vec_leading_shape = array_ops.shape(vec)[:-1] final_shape = array_ops.concat( (vec_leading_shape, self.block_shape_tensor()), 0) return array_ops.reshape(vec, final_shape) def _unblockify_then_matricize(self, vec): """Flatten the block dimensions then reshape to a batch matrix.""" # Suppose # vec.shape = [v0, v1, v2, v3], # self.block_depth = 2. # Then # leading shape = [v0, v1] # block shape = [v2, v3]. # We will reshape vec to # [v1, v2*v3, v0]. # Un-blockify: Flatten block dimensions. Reshape # [v0, v1, v2, v3] --> [v0, v1, v2*v3]. if vec.shape.is_fully_defined(): # vec_shape = [v0, v1, v2, v3] vec_shape = vec.shape.as_list() # vec_leading_shape = [v0, v1] vec_leading_shape = vec_shape[:-self.block_depth] # vec_block_shape = [v2, v3] vec_block_shape = vec_shape[-self.block_depth:] # flat_shape = [v0, v1, v2*v3] flat_shape = vec_leading_shape + [np.prod(vec_block_shape)] else: vec_shape = array_ops.shape(vec) vec_leading_shape = vec_shape[:-self.block_depth] vec_block_shape = vec_shape[-self.block_depth:] flat_shape = array_ops.concat( (vec_leading_shape, [math_ops.reduce_prod(vec_block_shape)]), 0) vec_flat = array_ops.reshape(vec, flat_shape) # Matricize: Reshape to batch matrix. # [v0, v1, v2*v3] --> [v1, v2*v3, v0], # representing a shape [v1] batch of [v2*v3, v0] matrices. matrix = distribution_util.rotate_transpose(vec_flat, shift=-1) return matrix def _fft(self, x): """FFT along the last self.block_depth dimensions of x. Args: x: `Tensor` with floating or complex `dtype`. Should be in the form returned by self._vectorize_then_blockify. Returns: `Tensor` with `dtype` `complex64`. """ x_complex = _to_complex(x) return _FFT_OP[self.block_depth](x_complex) def _ifft(self, x): """IFFT along the last self.block_depth dimensions of x. Args: x: `Tensor` with floating or complex dtype. Should be in the form returned by self._vectorize_then_blockify. Returns: `Tensor` with `dtype` `complex64`. """ x_complex = _to_complex(x) return _IFFT_OP[self.block_depth](x_complex) def convolution_kernel(self, name="convolution_kernel"): """Convolution kernel corresponding to `self.spectrum`. The `D` dimensional DFT of this kernel is the frequency domain spectrum of this operator. Args: name: A name to give this `Op`. Returns: `Tensor` with `dtype` `self.dtype`. """ with self._name_scope(name): h = self._ifft(self._spectrum_complex) return math_ops.cast(h, self.dtype) def _shape(self): s_shape = self._spectrum.shape # Suppose spectrum.shape = [a, b, c, d] # block_depth = 2 # Then: # batch_shape = [a, b] # N = c*d # and we want to return # [a, b, c*d, c*d] batch_shape = s_shape[:-self.block_depth] # trailing_dims = [c, d] trailing_dims = s_shape[-self.block_depth:] if trailing_dims.is_fully_defined(): n = np.prod(trailing_dims.as_list()) else: n = None n_x_n = tensor_shape.TensorShape([n, n]) return batch_shape.concatenate(n_x_n) def _shape_tensor(self): # See self.shape for explanation of steps s_shape = array_ops.shape(self._spectrum) batch_shape = s_shape[:-self.block_depth] trailing_dims = s_shape[-self.block_depth:] n = math_ops.reduce_prod(trailing_dims) n_x_n = [n, n] return array_ops.concat((batch_shape, n_x_n), 0) def assert_hermitian_spectrum(self, name="assert_hermitian_spectrum"): """Returns an `Op` that asserts this operator has Hermitian spectrum. This operator corresponds to a real-valued matrix if and only if its spectrum is Hermitian. Args: name: A name to give this `Op`. Returns: An `Op` that asserts this operator has Hermitian spectrum. """ eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps with self._name_scope(name): # Assume linear accumulation of error. max_err = eps * self.domain_dimension_tensor() imag_convolution_kernel = math_ops.imag(self.convolution_kernel()) return check_ops.assert_less( math_ops.abs(imag_convolution_kernel), max_err, message="Spectrum was not Hermitian") def _assert_non_singular(self): return linear_operator_util.assert_no_entries_with_modulus_zero( self.spectrum, message="Singular operator: Spectrum contained zero values.") def _assert_positive_definite(self): # This operator has the action Ax = F^H D F x, # where D is the diagonal matrix with self.spectrum on the diag. Therefore, # <x, Ax> = <Fx, DFx>, # Since F is bijective, the condition for positive definite is the same as # for a diagonal matrix, i.e. real part of spectrum is positive. message = ( "Not positive definite: Real part of spectrum was not all positive.") return check_ops.assert_positive( math_ops.real(self.spectrum), message=message) def _assert_self_adjoint(self): # Recall correspondence between symmetry and real transforms. See docstring return linear_operator_util.assert_zero_imag_part( self.spectrum, message=( "Not self-adjoint: The spectrum contained non-zero imaginary part." )) def _broadcast_batch_dims(self, x, spectrum): """Broadcast batch dims of batch matrix `x` and spectrum.""" # spectrum.shape = batch_shape + block_shape # First make spectrum a batch matrix with # spectrum.shape = batch_shape + [prod(block_shape), 1] spec_mat = array_ops.reshape( spectrum, array_ops.concat( (self.batch_shape_tensor(), [-1, 1]), axis=0)) # Second, broadcast, possibly requiring an addition of array of zeros. x, spec_mat = linear_operator_util.broadcast_matrix_batch_dims((x, spec_mat)) # Third, put the block shape back into spectrum. batch_shape = array_ops.shape(x)[:-2] spectrum = array_ops.reshape( spec_mat, array_ops.concat((batch_shape, self.block_shape_tensor()), axis=0)) return x, spectrum def _matmul(self, x, adjoint=False, adjoint_arg=False): x = linalg.adjoint(x) if adjoint_arg else x # With F the matrix of a DFT, and F^{-1}, F^H the inverse and Hermitian # transpose, one can show that F^{-1} = F^{H} is the IDFT matrix. Therefore # matmul(x) = F^{-1} diag(spectrum) F x, # = F^{H} diag(spectrum) F x, # so that # matmul(x, adjoint=True) = F^{H} diag(conj(spectrum)) F x. spectrum = self._conj_spectrum if adjoint else self._spectrum_complex x = math_ops.cast(x, spectrum.dtype) x, spectrum = self._broadcast_batch_dims(x, spectrum) x_vb = self._vectorize_then_blockify(x) fft_x_vb = self._fft(x_vb) block_vector_result = self._ifft(spectrum * fft_x_vb) y = self._unblockify_then_matricize(block_vector_result) return math_ops.cast(y, self.dtype) def _determinant(self): axis = [-(i + 1) for i in range(self.block_depth)] det = math_ops.reduce_prod(self.spectrum, axis=axis) return math_ops.cast(det, self.dtype) def _log_abs_determinant(self): axis = [-(i + 1) for i in range(self.block_depth)] lad = math_ops.reduce_sum(math_ops.log(self._abs_spectrum), axis=axis) return math_ops.cast(lad, self.dtype) def _solve(self, rhs, adjoint=False, adjoint_arg=False): rhs = linalg.adjoint(rhs) if adjoint_arg else rhs spectrum = self._conj_spectrum if adjoint else self._spectrum_complex rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum) rhs_vb = self._vectorize_then_blockify(rhs) fft_rhs_vb = self._fft(rhs_vb) solution_vb = self._ifft(fft_rhs_vb / spectrum) x = self._unblockify_then_matricize(solution_vb) return math_ops.cast(x, self.dtype) def _diag_part(self): # Get ones in shape of diag, which is [B1,...,Bb, N] # Also get the size of the diag, "N". if self.shape.is_fully_defined(): diag_shape = self.shape[:-1] diag_size = self.domain_dimension.value else: diag_shape = self.shape_tensor()[:-1] diag_size = self.domain_dimension_tensor() ones_diag = array_ops.ones(diag_shape, dtype=self.dtype) # As proved in comments in self._trace, the value on the diag is constant, # repeated N times. This value is the trace divided by N. # The handling of self.shape = (0, 0) is tricky, and is the reason we choose # to compute trace and use that to compute diag_part, rather than computing # the value on the diagonal ("diag_value") directly. Both result in a 0/0, # but in different places, and the current method gives the right result in # the end. # Here, if self.shape = (0, 0), then self.trace() = 0., and then # diag_value = 0. / 0. = NaN. diag_value = self.trace() / math_ops.cast(diag_size, self.dtype) # If self.shape = (0, 0), then ones_diag = [] (empty tensor), and then # the following line is NaN * [] = [], as needed. return diag_value[..., array_ops.newaxis] * ones_diag def _trace(self): # The diagonal of the [[nested] block] circulant operator is the mean of # the spectrum. # Proof: For the [0,...,0] element, this follows from the IDFT formula. # Then the result follows since all diagonal elements are the same. # Therefore, the trace is the sum of the spectrum. # Get shape of diag along with the axis over which to reduce the spectrum. # We will reduce the spectrum over all block indices. if self.spectrum.shape.is_fully_defined(): spec_rank = self.spectrum.shape.ndims axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32) else: spec_rank = array_ops.rank(self.spectrum) axis = math_ops.range(spec_rank - self.block_depth, spec_rank) # Real diag part "re_d". # Suppose spectrum.shape = [B1,...,Bb, N1, N2] # self.shape = [B1,...,Bb, N, N], with N1 * N2 = N. # re_d_value.shape = [B1,...,Bb] re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis) if not self.dtype.is_complex: return math_ops.cast(re_d_value, self.dtype) # Imaginary part, "im_d". if self.is_self_adjoint: im_d_value = array_ops.zeros_like(re_d_value) else: im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis) return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype) @tf_export("linalg.LinearOperatorCirculant") class LinearOperatorCirculant(_BaseLinearOperatorCirculant): """`LinearOperator` acting like a circulant matrix. This operator acts like a circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of circulant matrices Circulant means the entries of `A` are generated by a single vector, the convolution kernel `h`: `A_{mn} := h_{m-n mod N}`. With `h = [w, x, y, z]`, ``` A = |w z y x| |x w z y| |y x w z| |z y x w| ``` This means that the result of matrix multiplication `v = Au` has `Lth` column given circular convolution between `h` with the `Lth` column of `u`. See http://ee.stanford.edu/~gray/toeplitz.pdf #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. Define the discrete Fourier transform (DFT) and its inverse by ``` DFT[ h[n] ] = H[k] := sum_{n = 0}^{N - 1} h_n e^{-i 2pi k n / N} IDFT[ H[k] ] = h[n] = N^{-1} sum_{k = 0}^{N - 1} H_k e^{i 2pi k n / N} ``` From these definitions, we see that ``` H[0] = sum_{n = 0}^{N - 1} h_n H[1] = "the first positive frequency" H[N - 1] = "the first negative frequency" ``` Loosely speaking, with `*` element-wise multiplication, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT[ H * DFT[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT[u]` be the `[N, R]` matrix with `rth` column equal to the DFT of the `rth` column of `u`. Define the `IDFT` similarly. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT[ H * (DFT[u])_r ]``` #### Operator properties deduced from the spectrum. Letting `U` be the `kth` Euclidean basis vector, and `U = IDFT[u]`. The above formulas show that`A U = H_k * U`. We conclude that the elements of `H` are the eigenvalues of this operator. Therefore * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N]`. We say that `H` is a Hermitian spectrum if, with `%` meaning modulus division, ```H[..., n % N] = ComplexConjugate[ H[..., (-n) % N] ]``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. #### Example of a self-adjoint positive definite operator ```python # spectrum is real ==> operator is self-adjoint # spectrum is positive ==> operator is positive definite spectrum = [6., 4, 2] operator = LinearOperatorCirculant(spectrum) # IFFT[spectrum] operator.convolution_kernel() ==> [4 + 0j, 1 + 0.58j, 1 - 0.58j] operator.to_dense() ==> [[4 + 0.0j, 1 - 0.6j, 1 + 0.6j], [1 + 0.6j, 4 + 0.0j, 1 - 0.6j], [1 - 0.6j, 1 + 0.6j, 4 + 0.0j]] ``` #### Example of defining in terms of a real convolution kernel ```python # convolution_kernel is real ==> spectrum is Hermitian. convolution_kernel = [1., 2., 1.]] spectrum = tf.signal.fft(tf.cast(convolution_kernel, tf.complex64)) # spectrum is Hermitian ==> operator is real. # spectrum is shape [3] ==> operator is shape [3, 3] # We force the input/output type to be real, which allows this to operate # like a real matrix. operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) operator.to_dense() ==> [[ 1, 1, 2], [ 2, 1, 1], [ 1, 2, 1]] ``` #### Example of Hermitian spectrum ```python # spectrum is shape [3] ==> operator is shape [3, 3] # spectrum is Hermitian ==> operator is real. spectrum = [1, 1j, -1j] operator = LinearOperatorCirculant(spectrum) operator.to_dense() ==> [[ 0.33 + 0j, 0.91 + 0j, -0.24 + 0j], [-0.24 + 0j, 0.33 + 0j, 0.91 + 0j], [ 0.91 + 0j, -0.24 + 0j, 0.33 + 0j] ``` #### Example of forcing real `dtype` when spectrum is Hermitian ```python # spectrum is shape [4] ==> operator is shape [4, 4] # spectrum is real ==> operator is self-adjoint # spectrum is Hermitian ==> operator is real # spectrum has positive real part ==> operator is positive-definite. spectrum = [6., 4, 2, 4] # Force the input dtype to be float32. # Cast the output to float32. This is fine because the operator will be # real due to Hermitian spectrum. operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32) operator.shape ==> [4, 4] operator.to_dense() ==> [[4, 1, 0, 1], [1, 4, 1, 0], [0, 1, 4, 1], [1, 0, 1, 4]] # convolution_kernel = tf.signal.ifft(spectrum) operator.convolution_kernel() ==> [4, 1, 0, 1] ``` #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, spectrum, input_output_dtype=dtypes.complex64, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name="LinearOperatorCirculant"): r"""Initialize an `LinearOperatorCirculant`. This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` by providing `spectrum`, a `[B1,...,Bb, N]` `Tensor`. If `input_output_dtype = DTYPE`: * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. * Values returned by all methods, such as `matmul` or `determinant` will be cast to `DTYPE`. Note that if the spectrum is not Hermitian, then this operator corresponds to a complex matrix with non-zero imaginary part. In this case, setting `input_output_dtype` to a real type will forcibly cast the output to be real, resulting in incorrect results! If on the other hand the spectrum is Hermitian, then this operator corresponds to a real-valued matrix, and setting `input_output_dtype` to a real type is fine. Args: spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Type can be different than `input_output_dtype` input_output_dtype: `dtype` for input/output. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `spectrum` is real, this will always be true. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name to prepend to all ops created by this class. """ super(LinearOperatorCirculant, self).__init__( spectrum, block_depth=1, input_output_dtype=input_output_dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @tf_export("linalg.LinearOperatorCirculant2D") class LinearOperatorCirculant2D(_BaseLinearOperatorCirculant): """`LinearOperator` acting like a block circulant matrix. This operator acts like a block circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of block circulant matrices If `A` is block circulant, with block sizes `N0, N1` (`N0 * N1 = N`): `A` has a block circulant structure, composed of `N0 x N0` blocks, with each block an `N1 x N1` circulant matrix. For example, with `W`, `X`, `Y`, `Z` each circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` Note that `A` itself will not in general be circulant. #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. If `H.shape = [N0, N1]`, (`N0 * N1 = N`): Loosely speaking, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT2[ H DFT2[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT2[u]` be the `[N0, N1, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, R]` and taking a two dimensional DFT across the first two dimensions. Let `IDFT2` be the inverse of `DFT2`. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT2[ H * (DFT2[u])_r ]``` #### Operator properties deduced from the spectrum. * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N0, N1]`, we say that `H` is a Hermitian spectrum if, with `%` indicating modulus division, ``` H[..., n0 % N0, n1 % N1] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1 ]. ``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. ### Example of a self-adjoint positive definite operator ```python # spectrum is real ==> operator is self-adjoint # spectrum is positive ==> operator is positive definite spectrum = [[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]] operator = LinearOperatorCirculant2D(spectrum) # IFFT[spectrum] operator.convolution_kernel() ==> [[5.0+0.0j, -0.5-.3j, -0.5+.3j], [-1.5-.9j, 0, 0], [-1.5+.9j, 0, 0]] operator.to_dense() ==> Complex self adjoint 9 x 9 matrix. ``` #### Example of defining in terms of a real convolution kernel, ```python # convolution_kernel is real ==> spectrum is Hermitian. convolution_kernel = [[1., 2., 1.], [5., -1., 1.]] spectrum = tf.signal.fft2d(tf.cast(convolution_kernel, tf.complex64)) # spectrum is shape [2, 3] ==> operator is shape [6, 6] # spectrum is Hermitian ==> operator is real. operator = LinearOperatorCirculant2D(spectrum, input_output_dtype=tf.float32) ``` #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, spectrum, input_output_dtype=dtypes.complex64, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name="LinearOperatorCirculant2D"): r"""Initialize an `LinearOperatorCirculant2D`. This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` by providing `spectrum`, a `[B1,...,Bb, N0, N1]` `Tensor` with `N0*N1 = N`. If `input_output_dtype = DTYPE`: * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. * Values returned by all methods, such as `matmul` or `determinant` will be cast to `DTYPE`. Note that if the spectrum is not Hermitian, then this operator corresponds to a complex matrix with non-zero imaginary part. In this case, setting `input_output_dtype` to a real type will forcibly cast the output to be real, resulting in incorrect results! If on the other hand the spectrum is Hermitian, then this operator corresponds to a real-valued matrix, and setting `input_output_dtype` to a real type is fine. Args: spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Type can be different than `input_output_dtype` input_output_dtype: `dtype` for input/output. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `spectrum` is real, this will always be true. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name to prepend to all ops created by this class. """ super(LinearOperatorCirculant2D, self).__init__( spectrum, block_depth=2, input_output_dtype=input_output_dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @tf_export("linalg.LinearOperatorCirculant3D") class LinearOperatorCirculant3D(_BaseLinearOperatorCirculant): """`LinearOperator` acting like a nested block circulant matrix. This operator acts like a block circulant matrix `A` with shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `N x N` matrix. This matrix `A` is not materialized, but for purposes of broadcasting this shape will be relevant. #### Description in terms of block circulant matrices If `A` is nested block circulant, with block sizes `N0, N1, N2` (`N0 * N1 * N2 = N`): `A` has a block structure, composed of `N0 x N0` blocks, with each block an `N1 x N1` block circulant matrix. For example, with `W`, `X`, `Y`, `Z` each block circulant, ``` A = |W Z Y X| |X W Z Y| |Y X W Z| |Z Y X W| ``` Note that `A` itself will not in general be circulant. #### Description in terms of the frequency spectrum There is an equivalent description in terms of the [batch] spectrum `H` and Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch dimensions. If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`): Loosely speaking, matrix multiplication is equal to the action of a Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`. Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the `[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and taking a three dimensional DFT across the first three dimensions. Let `IDFT3` be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise: ```(A u)_r = IDFT3[ H * (DFT3[u])_r ]``` #### Operator properties deduced from the spectrum. * This operator is positive definite if and only if `Real{H} > 0`. A general property of Fourier transforms is the correspondence between Hermitian functions and real valued transforms. Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian spectrum if, with `%` meaning modulus division, ``` H[..., n0 % N0, n1 % N1, n2 % N2] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ]. ``` * This operator corresponds to a real matrix if and only if `H` is Hermitian. * This operator is self-adjoint if and only if `H` is real. See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer. ### Examples See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples. #### Performance Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(R*N*Log[N])` * `operator.solve(x)` is `O(R*N*Log[N])` * `operator.determinant()` involves a size `N` `reduce_prod`. If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, spectrum, input_output_dtype=dtypes.complex64, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=True, name="LinearOperatorCirculant3D"): """Initialize an `LinearOperatorCirculant`. This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]` by providing `spectrum`, a `[B1,...,Bb, N0, N1, N2]` `Tensor` with `N0*N1*N2 = N`. If `input_output_dtype = DTYPE`: * Arguments to methods such as `matmul` or `solve` must be `DTYPE`. * Values returned by all methods, such as `matmul` or `determinant` will be cast to `DTYPE`. Note that if the spectrum is not Hermitian, then this operator corresponds to a complex matrix with non-zero imaginary part. In this case, setting `input_output_dtype` to a real type will forcibly cast the output to be real, resulting in incorrect results! If on the other hand the spectrum is Hermitian, then this operator corresponds to a real-valued matrix, and setting `input_output_dtype` to a real type is fine. Args: spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. Type can be different than `input_output_dtype` input_output_dtype: `dtype` for input/output. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `spectrum` is real, this will always be true. is_positive_definite: Expect that this operator is positive definite, meaning the real part of all eigenvalues is positive. We do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name to prepend to all ops created by this class. """ super(LinearOperatorCirculant3D, self).__init__( spectrum, block_depth=3, input_output_dtype=input_output_dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _to_complex(x): if x.dtype.is_complex: return x dtype = dtypes.complex64 if x.dtype == dtypes.float64: dtype = dtypes.complex128 return math_ops.cast(x, dtype)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_circulant.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utilities for registering LinearOperator methods.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Note: only use this method in the commuting case. def combined_commuting_self_adjoint_hint(operator_a, operator_b): """Get combined hint for self-adjoint-ness.""" # The property is preserved under composition when the operators commute. if operator_a.is_self_adjoint and operator_b.is_self_adjoint: return True # The property is not preserved when an operator with the property is composed # with an operator without the property. # pylint:disable=g-bool-id-comparison if ((operator_a.is_self_adjoint is True and operator_b.is_self_adjoint is False) or (operator_a.is_self_adjoint is False and operator_b.is_self_adjoint is True)): return False # pylint:enable=g-bool-id-comparison # The property is not known when operators are not known to have the property # or both operators don't have the property (the property for the complement # class is not closed under composition). return None def is_square(operator_a, operator_b): """Return a hint to whether the composition is square.""" if operator_a.is_square and operator_b.is_square: return True if operator_a.is_square is False and operator_b.is_square is False: # pylint:disable=g-bool-id-comparison # Let A have shape [B, M, N], B have shape [B, N, L]. m = operator_a.range_dimension l = operator_b.domain_dimension if m is not None and l is not None: return m == l if (operator_a.is_square != operator_b.is_square) and ( operator_a.is_square is not None and operator_a.is_square is not None): return False return None # Note: Positive definiteness is only guaranteed to be preserved # when the operators commute and are symmetric. Only use this method in # commuting cases. def combined_commuting_positive_definite_hint(operator_a, operator_b): """Get combined PD hint for compositions.""" # pylint:disable=g-bool-id-comparison if (operator_a.is_positive_definite is True and operator_a.is_self_adjoint is True and operator_b.is_positive_definite is True and operator_b.is_self_adjoint is True): return True # pylint:enable=g-bool-id-comparison return None def combined_non_singular_hint(operator_a, operator_b): """Get combined hint for when .""" # If either operator is not-invertible the composition isn't. # pylint:disable=g-bool-id-comparison if (operator_a.is_non_singular is False or operator_b.is_non_singular is False): return False # pylint:enable=g-bool-id-comparison return operator_a.is_non_singular and operator_b.is_non_singular
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/registrations_util.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.adjoint.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_adjoint from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_block_diag from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_householder from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_kronecker # By default, return LinearOperatorAdjoint which switched the .matmul # and .solve methods. @linear_operator_algebra.RegisterAdjoint(linear_operator.LinearOperator) def _adjoint_linear_operator(linop): return linear_operator_adjoint.LinearOperatorAdjoint( linop, is_non_singular=linop.is_non_singular, is_self_adjoint=linop.is_self_adjoint, is_positive_definite=linop.is_positive_definite, is_square=linop.is_square) @linear_operator_algebra.RegisterAdjoint( linear_operator_adjoint.LinearOperatorAdjoint) def _adjoint_adjoint_linear_operator(linop): return linop.operator @linear_operator_algebra.RegisterAdjoint( linear_operator_identity.LinearOperatorIdentity) def _adjoint_identity(identity_operator): return identity_operator @linear_operator_algebra.RegisterAdjoint( linear_operator_identity.LinearOperatorScaledIdentity) def _adjoint_scaled_identity(identity_operator): multiplier = identity_operator.multiplier if multiplier.dtype.is_complex: multiplier = math_ops.conj(multiplier) return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access multiplier=multiplier, is_non_singular=identity_operator.is_non_singular, is_self_adjoint=identity_operator.is_self_adjoint, is_positive_definite=identity_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterAdjoint( linear_operator_diag.LinearOperatorDiag) def _adjoint_diag(diag_operator): diag = diag_operator.diag if diag.dtype.is_complex: diag = math_ops.conj(diag) return linear_operator_diag.LinearOperatorDiag( diag=diag, is_non_singular=diag_operator.is_non_singular, is_self_adjoint=diag_operator.is_self_adjoint, is_positive_definite=diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterAdjoint( linear_operator_block_diag.LinearOperatorBlockDiag) def _adjoint_block_diag(block_diag_operator): # We take the adjoint of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.adjoint() for operator in block_diag_operator.operators], is_non_singular=block_diag_operator.is_non_singular, is_self_adjoint=block_diag_operator.is_self_adjoint, is_positive_definite=block_diag_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterAdjoint( linear_operator_kronecker.LinearOperatorKronecker) def _adjoint_kronecker(kronecker_operator): # Adjoint of a Kronecker product is the Kronecker product # of adjoints. return linear_operator_kronecker.LinearOperatorKronecker( operators=[ operator.adjoint() for operator in kronecker_operator.operators], is_non_singular=kronecker_operator.is_non_singular, is_self_adjoint=kronecker_operator.is_self_adjoint, is_positive_definite=kronecker_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterAdjoint( linear_operator_circulant.LinearOperatorCirculant) def _adjoint_circulant(circulant_operator): spectrum = circulant_operator.spectrum if spectrum.dtype.is_complex: spectrum = math_ops.conj(spectrum) # Conjugating the spectrum is sufficient to get the adjoint. return linear_operator_circulant.LinearOperatorCirculant( spectrum=spectrum, is_non_singular=circulant_operator.is_non_singular, is_self_adjoint=circulant_operator.is_self_adjoint, is_positive_definite=circulant_operator.is_positive_definite, is_square=True) @linear_operator_algebra.RegisterAdjoint( linear_operator_householder.LinearOperatorHouseholder) def _adjoint_householder(householder_operator): return householder_operator
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/adjoint_registrations.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.solve.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_composition from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_inversion from tensorflow.python.ops.linalg import linear_operator_lower_triangular from tensorflow.python.ops.linalg import registrations_util # By default, use a LinearOperatorComposition to delay the computation. @linear_operator_algebra.RegisterSolve( linear_operator.LinearOperator, linear_operator.LinearOperator) def _solve_linear_operator(linop_a, linop_b): """Generic solve of two `LinearOperator`s.""" is_square = registrations_util.is_square(linop_a, linop_b) is_non_singular = None is_self_adjoint = None is_positive_definite = None if is_square: is_non_singular = registrations_util.combined_non_singular_hint( linop_a, linop_b) elif is_square is False: # pylint:disable=g-bool-id-comparison is_non_singular = False is_self_adjoint = False is_positive_definite = False return linear_operator_composition.LinearOperatorComposition( operators=[ linear_operator_inversion.LinearOperatorInversion(linop_a), linop_b ], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, ) @linear_operator_algebra.RegisterSolve( linear_operator_inversion.LinearOperatorInversion, linear_operator.LinearOperator) def _solve_inverse_linear_operator(linop_a, linop_b): """Solve inverse of generic `LinearOperator`s.""" return linop_a.operator.matmul(linop_b) # Identity @linear_operator_algebra.RegisterSolve( linear_operator_identity.LinearOperatorIdentity, linear_operator.LinearOperator) def _solve_linear_operator_identity_left(identity, linop): del identity return linop # Diag. @linear_operator_algebra.RegisterSolve( linear_operator_diag.LinearOperatorDiag, linear_operator_diag.LinearOperatorDiag) def _solve_linear_operator_diag(linop_a, linop_b): return linear_operator_diag.LinearOperatorDiag( diag=linop_b.diag / linop_a.diag, is_non_singular=registrations_util.combined_non_singular_hint( linop_a, linop_b), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_a, linop_b), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_a, linop_b)), is_square=True) @linear_operator_algebra.RegisterSolve( linear_operator_diag.LinearOperatorDiag, linear_operator_identity.LinearOperatorScaledIdentity) def _solve_linear_operator_diag_scaled_identity_right( linop_diag, linop_scaled_identity): return linear_operator_diag.LinearOperatorDiag( diag=linop_scaled_identity.multiplier / linop_diag.diag, is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_scaled_identity), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_scaled_identity), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_diag, linop_scaled_identity)), is_square=True) @linear_operator_algebra.RegisterSolve( linear_operator_identity.LinearOperatorScaledIdentity, linear_operator_diag.LinearOperatorDiag) def _solve_linear_operator_diag_scaled_identity_left( linop_scaled_identity, linop_diag): return linear_operator_diag.LinearOperatorDiag( diag=linop_diag.diag / linop_scaled_identity.multiplier, is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_scaled_identity), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_scaled_identity), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_diag, linop_scaled_identity)), is_square=True) @linear_operator_algebra.RegisterSolve( linear_operator_diag.LinearOperatorDiag, linear_operator_lower_triangular.LinearOperatorLowerTriangular) def _solve_linear_operator_diag_tril(linop_diag, linop_triangular): return linear_operator_lower_triangular.LinearOperatorLowerTriangular( tril=linop_triangular.to_dense() / linop_diag.diag[..., None], is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_triangular), # This is safe to do since the Triangular matrix is only self-adjoint # when it is a diagonal matrix, and hence commutes. is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_triangular), is_positive_definite=None, is_square=True) # Circulant. @linear_operator_algebra.RegisterSolve( linear_operator_circulant.LinearOperatorCirculant, linear_operator_circulant.LinearOperatorCirculant) def _solve_linear_operator_circulant_circulant(linop_a, linop_b): return linear_operator_circulant.LinearOperatorCirculant( spectrum=linop_b.spectrum / linop_a.spectrum, is_non_singular=registrations_util.combined_non_singular_hint( linop_a, linop_b), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_a, linop_b), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_a, linop_b)), is_square=True)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/solve_registrations.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`LinearOperator` that wraps a [batch] matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorFullMatrix"] @tf_export("linalg.LinearOperatorFullMatrix") class LinearOperatorFullMatrix(linear_operator.LinearOperator): """`LinearOperator` that wraps a [batch] matrix. This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `M x N` matrix. ```python # Create a 2 x 2 linear operator. matrix = [[1., 2.], [3., 4.]] operator = LinearOperatorFullMatrix(matrix) operator.to_dense() ==> [[1., 2.] [3., 4.]] operator.shape ==> [2, 2] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [2, 4] Tensor operator.matmul(x) ==> Shape [2, 4] Tensor # Create a [2, 3] batch of 4 x 4 linear operators. matrix = tf.random.normal(shape=[2, 3, 4, 4]) operator = LinearOperatorFullMatrix(matrix) ``` #### Shape compatibility This operator acts on [batch] matrix with compatible shape. `x` is a batch matrix with compatible shape for `matmul` and `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], with b >= 0 x.shape = [B1,...,Bb] + [N, R], with R >= 0. ``` #### Performance `LinearOperatorFullMatrix` has exactly the same performance as would be achieved by using standard `TensorFlow` matrix ops. Intelligent choices are made based on the following initialization hints. * If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a Cholesky factorization is used for the determinant and solve. In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape `[M, N]`, and `x.shape = [N, R]`. Then * `operator.matmul(x)` is `O(M * N * R)`. * If `M=N`, `operator.solve(x)` is `O(N^3 * R)`. * If `M=N`, `operator.determinant()` is `O(N^3)`. If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, matrix, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name="LinearOperatorFullMatrix"): r"""Initialize a `LinearOperatorFullMatrix`. Args: matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`. Allowed dtypes: `float16`, `float32`, `float64`, `complex64`, `complex128`. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: TypeError: If `diag.dtype` is not an allowed type. """ with ops.name_scope(name, values=[matrix]): self._matrix = linear_operator_util.convert_nonref_to_tensor( matrix, name="matrix") self._check_matrix(self._matrix) super(LinearOperatorFullMatrix, self).__init__( dtype=self._matrix.dtype, graph_parents=[self._matrix], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) def _check_matrix(self, matrix): """Static check of the `matrix` argument.""" allowed_dtypes = [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128, ] matrix = ops.convert_to_tensor(matrix, name="matrix") dtype = matrix.dtype if dtype not in allowed_dtypes: raise TypeError( "Argument matrix must have dtype in %s. Found: %s" % (allowed_dtypes, dtype)) if matrix.shape.ndims is not None and matrix.shape.ndims < 2: raise ValueError( "Argument matrix must have at least 2 dimensions. Found: %s" % matrix) def _shape(self): return self._matrix.shape def _shape_tensor(self): return array_ops.shape(self._matrix) def _matmul(self, x, adjoint=False, adjoint_arg=False): return math_ops.matmul( self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg) def _to_dense(self): return self._matrix
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Construct the Kronecker product of one or more `LinearOperators`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import common_shapes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperatorKronecker"] def _vec(x): """Stacks column of matrix to form a single column.""" return array_ops.reshape( array_ops.matrix_transpose(x), array_ops.concat( [array_ops.shape(x)[:-2], [-1]], axis=0)) def _unvec_by(y, num_col): """Unstack vector to form a matrix, with a specified amount of columns.""" return array_ops.matrix_transpose( array_ops.reshape( y, array_ops.concat( [array_ops.shape(y)[:-1], [num_col, -1]], axis=0))) def _rotate_last_dim(x, rotate_right=False): """Rotate the last dimension either left or right.""" ndims = array_ops.rank(x) if rotate_right: transpose_perm = array_ops.concat( [[ndims - 1], math_ops.range(0, ndims - 1)], axis=0) else: transpose_perm = array_ops.concat( [math_ops.range(1, ndims), [0]], axis=0) return array_ops.transpose(x, transpose_perm) @tf_export("linalg.LinearOperatorKronecker") class LinearOperatorKronecker(linear_operator.LinearOperator): """Kronecker product between two `LinearOperators`. This operator composes one or more linear operators `[op1,...,opJ]`, building a new `LinearOperator` representing the Kronecker product: `op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is associative). If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`, where the product is over all operators. ```python # Create a 4 x 4 linear operator composed of two 2 x 2 operators. operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]]) operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]]) operator = LinearOperatorKronecker([operator_1, operator_2]) operator.to_dense() ==> [[1., 2., 0., 0.], [3., 4., 0., 0.], [2., 4., 1., 2.], [6., 8., 3., 4.]] operator.shape ==> [4, 4] operator.log_abs_determinant() ==> scalar Tensor x = ... Shape [4, 2] Tensor operator.matmul(x) ==> Shape [4, 2] Tensor # Create a [2, 3] batch of 4 x 5 linear operators. matrix_45 = tf.random.normal(shape=[2, 3, 4, 5]) operator_45 = LinearOperatorFullMatrix(matrix) # Create a [2, 3] batch of 5 x 6 linear operators. matrix_56 = tf.random.normal(shape=[2, 3, 5, 6]) operator_56 = LinearOperatorFullMatrix(matrix_56) # Compose to create a [2, 3] batch of 20 x 30 operators. operator_large = LinearOperatorKronecker([operator_45, operator_56]) # Create a shape [2, 3, 20, 2] vector. x = tf.random.normal(shape=[2, 3, 6, 2]) operator_large.matmul(x) ==> Shape [2, 3, 30, 2] Tensor ``` #### Performance The performance of `LinearOperatorKronecker` on any operation is equal to the sum of the individual operators' operations. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operators, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize a `LinearOperatorKronecker`. `LinearOperatorKronecker` is initialized with a list of operators `[op_1,...,op_J]`. Args: operators: Iterable of `LinearOperator` objects, each with the same `dtype` and composable shape, representing the Kronecker factors. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Default is the individual operators names joined with `_x_`. Raises: TypeError: If all operators do not have the same `dtype`. ValueError: If `operators` is empty. """ # Validate operators. check_ops.assert_proper_iterable(operators) operators = list(operators) if not operators: raise ValueError( "Expected a list of >=1 operators. Found: %s" % operators) self._operators = operators # Validate dtype. dtype = operators[0].dtype for operator in operators: if operator.dtype != dtype: name_type = (str((o.name, o.dtype)) for o in operators) raise TypeError( "Expected all operators to have the same dtype. Found %s" % " ".join(name_type)) # Auto-set and check hints. # A Kronecker product is invertible, if and only if all factors are # invertible. if all(operator.is_non_singular for operator in operators): if is_non_singular is False: raise ValueError( "The Kronecker product of non-singular operators is always " "non-singular.") is_non_singular = True if all(operator.is_self_adjoint for operator in operators): if is_self_adjoint is False: raise ValueError( "The Kronecker product of self-adjoint operators is always " "self-adjoint.") is_self_adjoint = True # The eigenvalues of a Kronecker product are equal to the products of eigen # values of the corresponding factors. if all(operator.is_positive_definite for operator in operators): if is_positive_definite is False: raise ValueError("The Kronecker product of positive-definite operators " "is always positive-definite.") is_positive_definite = True # Initialization. graph_parents = [] for operator in operators: graph_parents.extend(operator.graph_parents) if name is None: name = operators[0].name for operator in operators[1:]: name += "_x_" + operator.name with ops.name_scope(name, values=graph_parents): super(LinearOperatorKronecker, self).__init__( dtype=dtype, graph_parents=graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @property def operators(self): return self._operators def _shape(self): # Get final matrix shape. domain_dimension = self.operators[0].domain_dimension for operator in self.operators[1:]: domain_dimension *= operator.domain_dimension range_dimension = self.operators[0].range_dimension for operator in self.operators[1:]: range_dimension *= operator.range_dimension matrix_shape = tensor_shape.TensorShape([ range_dimension, domain_dimension]) # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape for operator in self.operators[1:]: batch_shape = common_shapes.broadcast_shape( batch_shape, operator.batch_shape) return batch_shape.concatenate(matrix_shape) def _shape_tensor(self): domain_dimension = self.operators[0].domain_dimension_tensor() for operator in self.operators[1:]: domain_dimension *= operator.domain_dimension_tensor() range_dimension = self.operators[0].range_dimension_tensor() for operator in self.operators[1:]: range_dimension *= operator.range_dimension_tensor() matrix_shape = [range_dimension, domain_dimension] # Get broadcast batch shape. # broadcast_shape checks for compatibility. batch_shape = self.operators[0].batch_shape_tensor() for operator in self.operators[1:]: batch_shape = array_ops.broadcast_dynamic_shape( batch_shape, operator.batch_shape_tensor()) return array_ops.concat((batch_shape, matrix_shape), 0) def _matmul(self, x, adjoint=False, adjoint_arg=False): # Here we heavily rely on Roth's column Lemma [1]: # (A x B) * vec X = vec BXA^T, # where vec stacks all the columns of the matrix under each other. In our # case, x represents a batch of vec X (i.e. we think of x as a batch of # column vectors, rather than a matrix). Each member of the batch can be # reshaped to a matrix (hence we get a batch of matrices). # We can iteratively apply this lemma by noting that if B is a Kronecker # product, then we can apply the lemma again. # [1] W. E. Roth, "On direct product matrices," # Bulletin of the American Mathematical Society, vol. 40, pp. 461-468, # 1934 # Efficiency # Naively doing the Kronecker product, by calculating the dense matrix and # applying it will can take cubic time in the size of domain_dimension # (assuming a square matrix). The other issue is that calculating the dense # matrix can be prohibitively expensive, in that it can take a large amount # of memory. # # This implementation avoids this memory blow up by only computing matmuls # with the factors. In this way, we don't have to realize the dense matrix. # In terms of complexity, if we have Kronecker Factors of size: # (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we # have as input a [N, M] matrix, the naive approach would take O(N^2 M). # With this approach (ignoring reshaping of tensors and transposes for now), # the time complexity can be O(M * (\sum n_i) * N). There is also the # benefit of batched multiplication (In this example, the batch size is # roughly M * N) so this can be much faster. However, not factored in are # the costs of the several transposing of tensors, which can affect cache # behavior. # Below we document the shape manipulation for adjoint=False, # adjoint_arg=False, but the general case of different adjoints is still # handled. if adjoint_arg: x = linalg.adjoint(x) # Always add a batch dimension to enable broadcasting to work. batch_shape = array_ops.concat( [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0) x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype) # x has shape [B, R, C], where B represent some number of batch dimensions, # R represents the number of rows, and C represents the number of columns. # In order to apply Roth's column lemma, we need to operate on a batch of # column vectors, so we reshape into a batch of column vectors. We put it # at the front to ensure that broadcasting between operators to the batch # dimensions B still works. output = _rotate_last_dim(x, rotate_right=True) # Also expand the shape to be [A, C, B, R]. The first dimension will be # used to accumulate dimensions from each operator matmul. output = output[array_ops.newaxis, ...] # In this loop, A is going to refer to the value of the accumulated # dimension. A = 1 at the start, and will end up being self.range_dimension. # V will refer to the last dimension. V = R at the start, and will end up # being 1 in the end. for operator in self.operators[:-1]: # Reshape output from [A, C, B, V] to be # [A, C, B, V / op.domain_dimension, op.domain_dimension] if adjoint: operator_dimension = operator.range_dimension_tensor() else: operator_dimension = operator.domain_dimension_tensor() output = _unvec_by(output, operator_dimension) # We are computing (XA^T) = (AX^T)^T. # output has [A, C, B, V / op.domain_dimension, op.domain_dimension], # which is being converted to: # [A, C, B, V / op.domain_dimension, op.range_dimension] output = array_ops.matrix_transpose(output) output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False) output = array_ops.matrix_transpose(output) # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension] output = _rotate_last_dim(output, rotate_right=False) output = _vec(output) output = _rotate_last_dim(output, rotate_right=True) # After the loop, we will have # A = self.range_dimension / op[-1].range_dimension # V = op[-1].domain_dimension # We convert that using matvec to get: # [A, C, B, op[-1].range_dimension] output = self.operators[-1].matvec(output, adjoint=adjoint) # Rearrange shape to be [B1, ... Bn, self.range_dimension, C] output = _rotate_last_dim(output, rotate_right=False) output = _vec(output) output = _rotate_last_dim(output, rotate_right=False) if x.shape.is_fully_defined(): column_dim = x.shape[-1] broadcast_batch_shape = common_shapes.broadcast_shape( x.shape[:-2], self.batch_shape) if adjoint: matrix_dimensions = [self.domain_dimension, column_dim] else: matrix_dimensions = [self.range_dimension, column_dim] output.set_shape(broadcast_batch_shape.concatenate( matrix_dimensions)) return output def _determinant(self): # Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m # matrix, and X2 is an n x n matrix. We can iteratively apply this property # to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the # domain dimension of all operators, then we have: # |X1 x X2 x X3 ...| = # |X1| ** (T / m) * |X2 x X3 ... | ** m = # |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... = # |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n) # And by doing induction we have product(|X_i| ** (T / dim(X_i))). total = self.domain_dimension_tensor() determinant = 1. for operator in self.operators: determinant *= operator.determinant() ** math_ops.cast( total / operator.domain_dimension_tensor(), dtype=operator.dtype) return determinant def _log_abs_determinant(self): # This will be sum((total / dim(x_i)) * log |X_i|) total = self.domain_dimension_tensor() log_abs_det = 0. for operator in self.operators: log_abs_det += operator.log_abs_determinant() * math_ops.cast( total / operator.domain_dimension_tensor(), dtype=operator.dtype) return log_abs_det def _trace(self): # tr(A x B) = tr(A) * tr(B) trace = 1. for operator in self.operators: trace *= operator.trace() return trace def _solve(self, rhs, adjoint=False, adjoint_arg=False): # Here we follow the same use of Roth's column lemma as in `matmul`, with # the key difference that we replace all `matmul` instances with `solve`. # This follows from the property that inv(A x B) = inv(A) x inv(B). # Below we document the shape manipulation for adjoint=False, # adjoint_arg=False, but the general case of different adjoints is still # handled. if adjoint_arg: rhs = linalg.adjoint(rhs) # Always add a batch dimension to enable broadcasting to work. batch_shape = array_ops.concat( [array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0) rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype) # rhs has shape [B, R, C], where B represent some number of batch # dimensions, # R represents the number of rows, and C represents the number of columns. # In order to apply Roth's column lemma, we need to operate on a batch of # column vectors, so we reshape into a batch of column vectors. We put it # at the front to ensure that broadcasting between operators to the batch # dimensions B still works. output = _rotate_last_dim(rhs, rotate_right=True) # Also expand the shape to be [A, C, B, R]. The first dimension will be # used to accumulate dimensions from each operator matmul. output = output[array_ops.newaxis, ...] # In this loop, A is going to refer to the value of the accumulated # dimension. A = 1 at the start, and will end up being self.range_dimension. # V will refer to the last dimension. V = R at the start, and will end up # being 1 in the end. for operator in self.operators[:-1]: # Reshape output from [A, C, B, V] to be # [A, C, B, V / op.domain_dimension, op.domain_dimension] if adjoint: operator_dimension = operator.range_dimension_tensor() else: operator_dimension = operator.domain_dimension_tensor() output = _unvec_by(output, operator_dimension) # We are computing (XA^-1^T) = (A^-1 X^T)^T. # output has [A, C, B, V / op.domain_dimension, op.domain_dimension], # which is being converted to: # [A, C, B, V / op.domain_dimension, op.range_dimension] output = array_ops.matrix_transpose(output) output = operator.solve(output, adjoint=adjoint, adjoint_arg=False) output = array_ops.matrix_transpose(output) # Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension] output = _rotate_last_dim(output, rotate_right=False) output = _vec(output) output = _rotate_last_dim(output, rotate_right=True) # After the loop, we will have # A = self.range_dimension / op[-1].range_dimension # V = op[-1].domain_dimension # We convert that using matvec to get: # [A, C, B, op[-1].range_dimension] output = self.operators[-1].solvevec(output, adjoint=adjoint) # Rearrange shape to be [B1, ... Bn, self.range_dimension, C] output = _rotate_last_dim(output, rotate_right=False) output = _vec(output) output = _rotate_last_dim(output, rotate_right=False) if rhs.shape.is_fully_defined(): column_dim = rhs.shape[-1] broadcast_batch_shape = common_shapes.broadcast_shape( rhs.shape[:-2], self.batch_shape) if adjoint: matrix_dimensions = [self.domain_dimension, column_dim] else: matrix_dimensions = [self.range_dimension, column_dim] output.set_shape(broadcast_batch_shape.concatenate( matrix_dimensions)) return output def _diag_part(self): diag_part = self.operators[0].diag_part() for operator in self.operators[1:]: diag_part = diag_part[..., :, array_ops.newaxis] op_diag_part = operator.diag_part()[..., array_ops.newaxis, :] diag_part *= op_diag_part diag_part = array_ops.reshape( diag_part, shape=array_ops.concat( [array_ops.shape(diag_part)[:-2], [-1]], axis=0)) if self.range_dimension > self.domain_dimension: diag_dimension = self.domain_dimension else: diag_dimension = self.range_dimension diag_part.set_shape( self.batch_shape.concatenate(diag_dimension)) return diag_part def _to_dense(self): product = self.operators[0].to_dense() for operator in self.operators[1:]: # Product has shape [B, R1, 1, C1]. product = product[ ..., :, array_ops.newaxis, :, array_ops.newaxis] # Operator has shape [B, 1, R2, 1, C2]. op_to_mul = operator.to_dense()[ ..., array_ops.newaxis, :, array_ops.newaxis, :] # This is now [B, R1, R2, C1, C2]. product *= op_to_mul # Now merge together dimensions to get [B, R1 * R2, C1 * C2]. product = array_ops.reshape( product, shape=array_ops.concat( [array_ops.shape(product)[:-4], [array_ops.shape(product)[-4] * array_ops.shape(product)[-3], array_ops.shape(product)[-2] * array_ops.shape(product)[-1]] ], axis=0)) product.set_shape(self.shape) return product def _assert_non_singular(self): if all(operator.is_square for operator in self.operators): asserts = [operator.assert_non_singular() for operator in self.operators] return control_flow_ops.group(asserts) else: raise errors.InvalidArgumentError( node_def=None, op=None, message="All Kronecker factors must be " "square for the product to be invertible.") def _assert_self_adjoint(self): if all(operator.is_square for operator in self.operators): asserts = [operator.assert_self_adjoint() for operator in self.operators] return control_flow_ops.group(asserts) else: raise errors.InvalidArgumentError( node_def=None, op=None, message="All Kronecker factors must be " "square for the product to be self adjoint.")
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_kronecker.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for linear operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import contextlib import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export __all__ = ["LinearOperator"] # TODO(langmore) Use matrix_solve_ls for singular or non-square matrices. @tf_export("linalg.LinearOperator") @six.add_metaclass(abc.ABCMeta) class LinearOperator(module.Module): """Base class defining a [batch of] linear operator[s]. Subclasses of `LinearOperator` provide access to common methods on a (batch) matrix, without the need to materialize the matrix. This allows: * Matrix free computations * Operators that take advantage of special structure, while providing a consistent API to users. #### Subclassing To enable a public method, subclasses should implement the leading-underscore version of the method. The argument signature should be identical except for the omission of `name="..."`. For example, to enable `matmul(x, adjoint=False, name="matmul")` a subclass should implement `_matmul(x, adjoint=False)`. #### Performance contract Subclasses should only implement the assert methods (e.g. `assert_non_singular`) if they can be done in less than `O(N^3)` time. Class docstrings should contain an explanation of computational complexity. Since this is a high-performance library, attention should be paid to detail, and explanations can include constants as well as Big-O notation. #### Shape compatibility `LinearOperator` subclasses should operate on a [batch] matrix with compatible shape. Class docstrings should define what is meant by compatible shape. Some subclasses may not support batching. Examples: `x` is a batch matrix with compatible shape for `matmul` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, x.shape = [B1,...,Bb] + [N, R] ``` `rhs` is a batch matrix with compatible shape for `solve` if ``` operator.shape = [B1,...,Bb] + [M, N], b >= 0, rhs.shape = [B1,...,Bb] + [M, R] ``` #### Example docstring for subclasses. This operator acts like a (batch) matrix `A` with shape `[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is an `m x n` matrix. Again, this matrix `A` may not be materialized, but for purposes of identifying and working with compatible arguments the shape is relevant. Examples: ```python some_tensor = ... shape = ???? operator = MyLinOp(some_tensor) operator.shape() ==> [2, 4, 4] operator.log_abs_determinant() ==> Shape [2] Tensor x = ... Shape [2, 4, 5] Tensor operator.matmul(x) ==> Shape [2, 4, 5] Tensor ``` #### Shape compatibility This operator acts on batch matrices with compatible shape. FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE #### Performance FILL THIS IN #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, dtype, graph_parents=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize the `LinearOperator`. **This is a private method for subclass use.** **Subclasses should copy-paste this `__init__` documentation.** Args: dtype: The type of the this `LinearOperator`. Arguments to `matmul` and `solve` will have to be this type. graph_parents: Python list of graph prerequisites of this `LinearOperator` Typically tensors that are passed during initialization. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `dtype` is real, this is equivalent to being symmetric. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If any member of graph_parents is `None` or not a `Tensor`. ValueError: If hints are set incorrectly. """ # Check and auto-set flags. if is_positive_definite: if is_non_singular is False: raise ValueError("A positive definite matrix is always non-singular.") is_non_singular = True if is_non_singular: if is_square is False: raise ValueError("A non-singular matrix is always square.") is_square = True if is_self_adjoint: if is_square is False: raise ValueError("A self-adjoint matrix is always square.") is_square = True self._is_square_set_or_implied_by_hints = is_square graph_parents = [] if graph_parents is None else graph_parents for i, t in enumerate(graph_parents): if t is None or not (linear_operator_util.is_ref(t) or tensor_util.is_tensor(t)): raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) self._dtype = dtypes.as_dtype(dtype).base_dtype if dtype else dtype self._graph_parents = graph_parents self._is_non_singular = is_non_singular self._is_self_adjoint = is_self_adjoint self._is_positive_definite = is_positive_definite self._name = name or type(self).__name__ @contextlib.contextmanager def _name_scope(self, name=None): """Helper function to standardize op scope.""" full_name = self.name if name is not None: full_name += "/" + name with ops.name_scope(full_name) as scope: yield scope @property def dtype(self): """The `DType` of `Tensor`s handled by this `LinearOperator`.""" return self._dtype @property def name(self): """Name prepended to all ops created by this `LinearOperator`.""" return self._name @property def graph_parents(self): """List of graph dependencies of this `LinearOperator`.""" return self._graph_parents @property def is_non_singular(self): return self._is_non_singular @property def is_self_adjoint(self): return self._is_self_adjoint @property def is_positive_definite(self): return self._is_positive_definite @property def is_square(self): """Return `True/False` depending on if this operator is square.""" # Static checks done after __init__. Why? Because domain/range dimension # sometimes requires lots of work done in the derived class after init. auto_square_check = self.domain_dimension == self.range_dimension if self._is_square_set_or_implied_by_hints is False and auto_square_check: raise ValueError( "User set is_square hint to False, but the operator was square.") if self._is_square_set_or_implied_by_hints is None: return auto_square_check return self._is_square_set_or_implied_by_hints @abc.abstractmethod def _shape(self): # Write this in derived class to enable all static shape methods. raise NotImplementedError("_shape is not implemented.") @property def shape(self): """`TensorShape` of this `LinearOperator`. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `TensorShape([B1,...,Bb, M, N])`, equivalent to `A.shape`. Returns: `TensorShape`, statically determined, may be undefined. """ return self._shape() @abc.abstractmethod def _shape_tensor(self): raise NotImplementedError("_shape_tensor is not implemented.") def shape_tensor(self, name="shape_tensor"): """Shape of this `LinearOperator`, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ with self._name_scope(name): # Prefer to use statically defined shape if available. if self.shape.is_fully_defined(): return linear_operator_util.shape_tensor(self.shape.as_list()) else: return self._shape_tensor() @property def batch_shape(self): """`TensorShape` of batch dimensions of this `LinearOperator`. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]` Returns: `TensorShape`, statically determined, may be undefined. """ # Derived classes get this "for free" once .shape is implemented. return self.shape[:-2] def batch_shape_tensor(self, name="batch_shape_tensor"): """Shape of batch dimensions of this operator, determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding `[B1,...,Bb]`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): # Prefer to use statically defined shape if available. if self.batch_shape.is_fully_defined(): return linear_operator_util.shape_tensor( self.batch_shape.as_list(), name="batch_shape") else: return self.shape_tensor()[:-2] @property def tensor_rank(self, name="tensor_rank"): """Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op`. Returns: Python integer, or None if the tensor rank is undefined. """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): return self.shape.ndims def tensor_rank_tensor(self, name="tensor_rank_tensor"): """Rank (in the sense of tensors) of matrix corresponding to this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`. Args: name: A name for this `Op`. Returns: `int32` `Tensor`, determined at runtime. """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): # Prefer to use statically defined shape if available. if self.tensor_rank is not None: return ops.convert_to_tensor(self.tensor_rank) else: return array_ops.size(self.shape_tensor()) @property def domain_dimension(self): """Dimension (in the sense of vector spaces) of the domain of this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. Returns: `Dimension` object. """ # Derived classes get this "for free" once .shape is implemented. if self.shape.rank is None: return tensor_shape.Dimension(None) else: return self.shape.dims[-1] def domain_dimension_tensor(self, name="domain_dimension_tensor"): """Dimension (in the sense of vector spaces) of the domain of this operator. Determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `N`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): # Prefer to use statically defined shape if available. dim_value = tensor_shape.dimension_value(self.domain_dimension) if dim_value is not None: return ops.convert_to_tensor(dim_value) else: return self.shape_tensor()[-1] @property def range_dimension(self): """Dimension (in the sense of vector spaces) of the range of this operator. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. Returns: `Dimension` object. """ # Derived classes get this "for free" once .shape is implemented. if self.shape.dims: return self.shape.dims[-2] else: return tensor_shape.Dimension(None) def range_dimension_tensor(self, name="range_dimension_tensor"): """Dimension (in the sense of vector spaces) of the range of this operator. Determined at runtime. If this operator acts like the batch matrix `A` with `A.shape = [B1,...,Bb, M, N]`, then this returns `M`. Args: name: A name for this `Op`. Returns: `int32` `Tensor` """ # Derived classes get this "for free" once .shape() is implemented. with self._name_scope(name): # Prefer to use statically defined shape if available. dim_value = tensor_shape.dimension_value(self.range_dimension) if dim_value is not None: return ops.convert_to_tensor(dim_value) else: return self.shape_tensor()[-2] def _assert_non_singular(self): """Private default implementation of _assert_non_singular.""" logging.warn( "Using (possibly slow) default implementation of assert_non_singular." " Requires conversion to a dense matrix and O(N^3) operations.") if self._can_use_cholesky(): return self.assert_positive_definite() else: singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False) # TODO(langmore) Add .eig and .cond as methods. cond = (math_ops.reduce_max(singular_values, axis=-1) / math_ops.reduce_min(singular_values, axis=-1)) return check_ops.assert_less( cond, self._max_condition_number_to_be_non_singular(), message="Singular matrix up to precision epsilon.") def _max_condition_number_to_be_non_singular(self): """Return the maximum condition number that we consider nonsingular.""" with ops.name_scope("max_nonsingular_condition_number"): dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps eps = math_ops.cast( math_ops.reduce_max([ 100., math_ops.cast(self.range_dimension_tensor(), self.dtype), math_ops.cast(self.domain_dimension_tensor(), self.dtype) ]), self.dtype) * dtype_eps return 1. / eps def assert_non_singular(self, name="assert_non_singular"): """Returns an `Op` that asserts this operator is non singular. This operator is considered non-singular if ``` ConditionNumber < max{100, range_dimension, domain_dimension} * eps, eps := np.finfo(self.dtype.as_numpy_dtype).eps ``` Args: name: A string name to prepend to created ops. Returns: An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if the operator is singular. """ with self._name_scope(name): return self._assert_non_singular() def _assert_positive_definite(self): """Default implementation of _assert_positive_definite.""" logging.warn( "Using (possibly slow) default implementation of " "assert_positive_definite." " Requires conversion to a dense matrix and O(N^3) operations.") # If the operator is self-adjoint, then checking that # Cholesky decomposition succeeds + results in positive diag is necessary # and sufficient. if self.is_self_adjoint: return check_ops.assert_positive( array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())), message="Matrix was not positive definite.") # We have no generic check for positive definite. raise NotImplementedError("assert_positive_definite is not implemented.") def assert_positive_definite(self, name="assert_positive_definite"): """Returns an `Op` that asserts this operator is positive definite. Here, positive definite means that the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive definite. Args: name: A name to give this `Op`. Returns: An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if the operator is not positive definite. """ with self._name_scope(name): return self._assert_positive_definite() def _assert_self_adjoint(self): dense = self.to_dense() logging.warn( "Using (possibly slow) default implementation of assert_self_adjoint." " Requires conversion to a dense matrix.") return check_ops.assert_equal( dense, linalg.adjoint(dense), message="Matrix was not equal to its adjoint.") def assert_self_adjoint(self, name="assert_self_adjoint"): """Returns an `Op` that asserts this operator is self-adjoint. Here we check that this operator is *exactly* equal to its hermitian transpose. Args: name: A string name to prepend to created ops. Returns: An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if the operator is not self-adjoint. """ with self._name_scope(name): return self._assert_self_adjoint() def _check_input_dtype(self, arg): """Check that arg.dtype == self.dtype.""" if arg.dtype.base_dtype != self.dtype: raise TypeError( "Expected argument to have dtype %s. Found: %s in tensor %s" % (self.dtype, arg.dtype, arg)) @abc.abstractmethod def _matmul(self, x, adjoint=False, adjoint_arg=False): raise NotImplementedError("_matmul is not implemented.") def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"): """Transform [batch] matrix `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] X = ... # shape [..., N, R], batch matrix, R > 0. Y = operator.matmul(X) Y.shape ==> [..., M, R] Y[..., :, r] = sum_j A[..., :, j] X[j, r] ``` Args: x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as `self`. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is the hermitian transpose (transposition and complex conjugation). name: A name for this `Op`. Returns: A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype` as `self`. """ if isinstance(x, LinearOperator): left_operator = self.adjoint() if adjoint else self right_operator = x.adjoint() if adjoint_arg else x if (right_operator.range_dimension is not None and left_operator.domain_dimension is not None and right_operator.range_dimension != left_operator.domain_dimension): raise ValueError( "Operators are incompatible. Expected `x` to have dimension" " {} but got {}.".format( left_operator.domain_dimension, right_operator.range_dimension)) with self._name_scope(name): return linear_operator_algebra.matmul(left_operator, right_operator) with self._name_scope(name): x = ops.convert_to_tensor(x, name="x") self._check_input_dtype(x) self_dim = -2 if adjoint else -1 arg_dim = -1 if adjoint_arg else -2 tensor_shape.dimension_at_index( self.shape, self_dim).assert_is_compatible_with( x.shape[arg_dim]) return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg) def _matvec(self, x, adjoint=False): x_mat = array_ops.expand_dims(x, axis=-1) y_mat = self.matmul(x_mat, adjoint=adjoint) return array_ops.squeeze(y_mat, axis=-1) def matvec(self, x, adjoint=False, name="matvec"): """Transform [batch] vector `x` with left multiplication: `x --> Ax`. ```python # Make an operator acting like batch matric A. Assume A.shape = [..., M, N] operator = LinearOperator(...) X = ... # shape [..., N], batch vector Y = operator.matvec(X) Y.shape ==> [..., M] Y[..., :] = sum_j A[..., :, j] X[..., j] ``` Args: x: `Tensor` with compatible shape and same `dtype` as `self`. `x` is treated as a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`. name: A name for this `Op`. Returns: A `Tensor` with shape `[..., M]` and same `dtype` as `self`. """ with self._name_scope(name): x = ops.convert_to_tensor(x, name="x") self._check_input_dtype(x) self_dim = -2 if adjoint else -1 tensor_shape.dimension_at_index( self.shape, self_dim).assert_is_compatible_with(x.shape[-1]) return self._matvec(x, adjoint=adjoint) def _determinant(self): logging.warn( "Using (possibly slow) default implementation of determinant." " Requires conversion to a dense matrix and O(N^3) operations.") if self._can_use_cholesky(): return math_ops.exp(self.log_abs_determinant()) return linalg_ops.matrix_determinant(self.to_dense()) def determinant(self, name="det"): """Determinant for every batch member. Args: name: A name for this `Op`. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`. """ if self.is_square is False: raise NotImplementedError( "Determinant not implemented for an operator that is expected to " "not be square.") with self._name_scope(name): return self._determinant() def _log_abs_determinant(self): logging.warn( "Using (possibly slow) default implementation of determinant." " Requires conversion to a dense matrix and O(N^3) operations.") if self._can_use_cholesky(): diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())) return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1]) _, log_abs_det = linalg.slogdet(self.to_dense()) return log_abs_det def log_abs_determinant(self, name="log_abs_det"): """Log absolute value of determinant for every batch member. Args: name: A name for this `Op`. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`. """ if self.is_square is False: raise NotImplementedError( "Determinant not implemented for an operator that is expected to " "not be square.") with self._name_scope(name): return self._log_abs_determinant() def _solve(self, rhs, adjoint=False, adjoint_arg=False): """Default implementation of _solve.""" if self.is_square is False: raise NotImplementedError( "Solve is not yet implemented for non-square operators.") logging.warn( "Using (possibly slow) default implementation of solve." " Requires conversion to a dense matrix and O(N^3) operations.") rhs = linalg.adjoint(rhs) if adjoint_arg else rhs if self._can_use_cholesky(): return linear_operator_util.cholesky_solve_with_broadcast( linalg_ops.cholesky(self.to_dense()), rhs) return linear_operator_util.matrix_solve_with_broadcast( self.to_dense(), rhs, adjoint=adjoint) def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"): """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`. The returned `Tensor` will be close to an exact solution if `A` is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] # Solve R > 0 linear systems for every member of the batch. RHS = ... # shape [..., M, R] X = operator.solve(RHS) # X[..., :, r] is the solution to the r'th linear system # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r] operator.matmul(X) ==> RHS ``` Args: rhs: `Tensor` with same `dtype` as this operator and compatible shape. `rhs` is treated like a [batch] matrix meaning for every set of leading dimensions, the last two dimensions defines a matrix. See class docstring for definition of compatibility. adjoint: Python `bool`. If `True`, solve the system involving the adjoint of this `LinearOperator`: `A^H X = rhs`. adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H` is the hermitian transpose (transposition and complex conjugation). name: A name scope to use for ops added by this method. Returns: `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`. Raises: NotImplementedError: If `self.is_non_singular` or `is_square` is False. """ if self.is_non_singular is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "be singular.") if self.is_square is False: raise NotImplementedError( "Exact solve not implemented for an operator that is expected to " "not be square.") if isinstance(rhs, LinearOperator): left_operator = self.adjoint() if adjoint else self right_operator = rhs.adjoint() if adjoint_arg else rhs if (right_operator.range_dimension is not None and left_operator.domain_dimension is not None and right_operator.range_dimension != left_operator.domain_dimension): raise ValueError( "Operators are incompatible. Expected `rhs` to have dimension" " {} but got {}.".format( left_operator.domain_dimension, right_operator.range_dimension)) with self._name_scope(name): return linear_operator_algebra.solve(left_operator, right_operator) with self._name_scope(name): rhs = ops.convert_to_tensor(rhs, name="rhs") self._check_input_dtype(rhs) self_dim = -1 if adjoint else -2 arg_dim = -1 if adjoint_arg else -2 tensor_shape.dimension_at_index( self.shape, self_dim).assert_is_compatible_with( rhs.shape[arg_dim]) return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg) def _solvevec(self, rhs, adjoint=False): """Default implementation of _solvevec.""" rhs_mat = array_ops.expand_dims(rhs, axis=-1) solution_mat = self.solve(rhs_mat, adjoint=adjoint) return array_ops.squeeze(solution_mat, axis=-1) def solvevec(self, rhs, adjoint=False, name="solve"): """Solve single equation with best effort: `A X = rhs`. The returned `Tensor` will be close to an exact solution if `A` is well conditioned. Otherwise closeness will vary. See class docstring for details. Examples: ```python # Make an operator acting like batch matrix A. Assume A.shape = [..., M, N] operator = LinearOperator(...) operator.shape = [..., M, N] # Solve one linear system for every member of the batch. RHS = ... # shape [..., M] X = operator.solvevec(RHS) # X is the solution to the linear system # sum_j A[..., :, j] X[..., j] = RHS[..., :] operator.matvec(X) ==> RHS ``` Args: rhs: `Tensor` with same `dtype` as this operator. `rhs` is treated like a [batch] vector meaning for every set of leading dimensions, the last dimension defines a vector. See class docstring for definition of compatibility regarding batch dimensions. adjoint: Python `bool`. If `True`, solve the system involving the adjoint of this `LinearOperator`: `A^H X = rhs`. name: A name scope to use for ops added by this method. Returns: `Tensor` with shape `[...,N]` and same `dtype` as `rhs`. Raises: NotImplementedError: If `self.is_non_singular` or `is_square` is False. """ with self._name_scope(name): rhs = ops.convert_to_tensor(rhs, name="rhs") self._check_input_dtype(rhs) self_dim = -1 if adjoint else -2 tensor_shape.dimension_at_index( self.shape, self_dim).assert_is_compatible_with(rhs.shape[-1]) return self._solvevec(rhs, adjoint=adjoint) def adjoint(self, name="adjoint"): """Returns the adjoint of the current `LinearOperator`. Given `A` representing this `LinearOperator`, return `A*`. Note that calling `self.adjoint()` and `self.H` are equivalent. Args: name: A name for this `Op`. Returns: `LinearOperator` which represents the adjoint of this `LinearOperator`. """ if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison return self with self._name_scope(name): return linear_operator_algebra.adjoint(self) # self.H is equivalent to self.adjoint(). H = property(adjoint, None) def inverse(self, name="inverse"): """Returns the Inverse of this `LinearOperator`. Given `A` representing this `LinearOperator`, return a `LinearOperator` representing `A^-1`. Args: name: A name scope to use for ops added by this method. Returns: `LinearOperator` representing inverse of this matrix. Raises: ValueError: When the `LinearOperator` is not hinted to be `non_singular`. """ if self.is_square is False: # pylint: disable=g-bool-id-comparison raise ValueError("Cannot take the Inverse: This operator represents " "a non square matrix.") if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison raise ValueError("Cannot take the Inverse: This operator represents " "a singular matrix.") with self._name_scope(name): return linear_operator_algebra.inverse(self) def cholesky(self, name="cholesky"): """Returns a Cholesky factor as a `LinearOperator`. Given `A` representing this `LinearOperator`, if `A` is positive definite self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky decomposition. Args: name: A name for this `Op`. Returns: `LinearOperator` which represents the lower triangular matrix in the Cholesky decomposition. Raises: ValueError: When the `LinearOperator` is not hinted to be positive definite and self adjoint. """ if not self._can_use_cholesky(): raise ValueError("Cannot take the Cholesky decomposition: " "Not a positive definite self adjoint matrix.") with self._name_scope(name): return linear_operator_algebra.cholesky(self) def _to_dense(self): """Generic and often inefficient implementation. Override often.""" if self.batch_shape.is_fully_defined(): batch_shape = self.batch_shape else: batch_shape = self.batch_shape_tensor() dim_value = tensor_shape.dimension_value(self.domain_dimension) if dim_value is not None: n = dim_value else: n = self.domain_dimension_tensor() eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype) return self.matmul(eye) def to_dense(self, name="to_dense"): """Return a dense (batch) matrix representing this operator.""" with self._name_scope(name): return self._to_dense() def _diag_part(self): """Generic and often inefficient implementation. Override often.""" return array_ops.matrix_diag_part(self.to_dense()) def diag_part(self, name="diag_part"): """Efficiently get the [batch] diagonal part of this operator. If this operator has shape `[B1,...,Bb, M, N]`, this returns a `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`. ``` my_operator = LinearOperatorDiag([1., 2.]) # Efficiently get the diagonal my_operator.diag_part() ==> [1., 2.] # Equivalent, but inefficient method tf.linalg.diag_part(my_operator.to_dense()) ==> [1., 2.] ``` Args: name: A name for this `Op`. Returns: diag_part: A `Tensor` of same `dtype` as self. """ with self._name_scope(name): return self._diag_part() def _trace(self): return math_ops.reduce_sum(self.diag_part(), axis=-1) def trace(self, name="trace"): """Trace of the linear operator, equal to sum of `self.diag_part()`. If the operator is square, this is also the sum of the eigenvalues. Args: name: A name for this `Op`. Returns: Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`. """ with self._name_scope(name): return self._trace() def _add_to_tensor(self, x): # Override if a more efficient implementation is available. return self.to_dense() + x def add_to_tensor(self, x, name="add_to_tensor"): """Add matrix represented by this operator to `x`. Equivalent to `A + x`. Args: x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`. name: A name to give this `Op`. Returns: A `Tensor` with broadcast shape and same `dtype` as `self`. """ with self._name_scope(name): x = ops.convert_to_tensor(x, name="x") self._check_input_dtype(x) return self._add_to_tensor(x) def _can_use_cholesky(self): return self.is_self_adjoint and self.is_positive_definite # Overrides for tf.linalg functions. This allows a LinearOperator to be used in # place of a Tensor. # For instance tf.trace(linop) and linop.trace() both work. @dispatch.dispatch_for_types(linalg.adjoint, LinearOperator) def _adjoint(matrix, name=None): return matrix.adjoint(name) @dispatch.dispatch_for_types(linalg.cholesky, LinearOperator) def _cholesky(input, name=None): # pylint:disable=redefined-builtin return input.cholesky(name) # The signature has to match with the one in python/op/array_ops.py, # so we have k and padding_value even though we don't use them here. @dispatch.dispatch_for_types(linalg.diag_part, LinearOperator) def _diag_part(input, name="diag_part", k=0, padding_value=0): # pylint:disable=redefined-builtin, unused-argument return input.diag_part(name) @dispatch.dispatch_for_types(linalg.det, LinearOperator) def _det(input, name=None): # pylint:disable=redefined-builtin return input.determinant(name) @dispatch.dispatch_for_types(linalg.inv, LinearOperator) def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin inv = input.inverse(name) if adjoint: inv = inv.adjoint() return inv @dispatch.dispatch_for_types(linalg.logdet, LinearOperator) def _logdet(matrix, name=None): if matrix.is_positive_definite and matrix.is_self_adjoint: return matrix.log_abs_determinant(name) raise ValueError("Expected matrix to be self-adjoint positive definite.") @dispatch.dispatch_for_types(math_ops.matmul, LinearOperator) def _matmul( # pylint:disable=missing-docstring a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None): if transpose_a or transpose_b: raise ValueError("Transposing not supported at this time.") if a_is_sparse or b_is_sparse: raise ValueError("Sparse methods not supported at this time.") if not isinstance(a, LinearOperator): # We use the identity (B^HA^H)^H = AB adjoint_matmul = b.matmul( a, adjoint=(not adjoint_b), adjoint_arg=(not adjoint_a), name=name) return linalg.adjoint(adjoint_matmul) return a.matmul( b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name) @dispatch.dispatch_for_types(linalg.solve, LinearOperator) def _solve( matrix, rhs, adjoint=False, name=None): if not isinstance(matrix, LinearOperator): raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a " "LinearOperator is not supported.") return matrix.solve(rhs, adjoint=adjoint, name=name) @dispatch.dispatch_for_types(linalg.trace, LinearOperator) def _trace(x, name=None): return x.trace(name)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Internal utilities for `LinearOperator` classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables as variables_module from tensorflow.python.ops.linalg import linalg_impl as linalg ################################################################################ # To make more friendly for TF2. ################################################################################ def convert_nonref_to_tensor(value, dtype=None, dtype_hint=None, name=None): """Converts the given `value` to a `Tensor` if input is nonreference type. This function converts Python objects of various types to `Tensor` objects except if the input has nonreference semantics. Reference semantics are characterized by `is_ref` and is any object which is a `tf.Variable` or instance of `tf.Module`. This function accepts any input which `tf.convert_to_tensor` would also. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: tensor: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. #### Examples: ```python x = tf.Variable(0.) y = convert_nonref_to_tensor(x) x is y # ==> True x = tf.constant(0.) y = convert_nonref_to_tensor(x) x is y # ==> True x = np.array(0.) y = convert_nonref_to_tensor(x) x is y # ==> False tf.is_tensor(y) # ==> True x = tfp.util.DeferredTensor(lambda x: x, 13.37) y = convert_nonref_to_tensor(x) x is y # ==> True tf.is_tensor(y) # ==> False tf.equal(y, 13.37) # ==> True ``` """ # We explicitly do not use a tf.name_scope to avoid graph clutter. if value is None: return None if is_ref(value): if dtype is None: return value dtype_base = base_dtype(dtype) value_dtype_base = base_dtype(value.dtype) if dtype_base != value_dtype_base: raise TypeError('Mutable type must be of dtype "{}" but is "{}".'.format( dtype_name(dtype_base), dtype_name(value_dtype_base))) return value return ops.convert_to_tensor( value, dtype=dtype, dtype_hint=dtype_hint, name=name) def base_dtype(dtype): """Returns a non-reference `dtype` based on this `dtype`.""" dtype = dtypes.as_dtype(dtype) if hasattr(dtype, "base_dtype"): return dtype.base_dtype return dtype def dtype_name(dtype): """Returns the string name for this `dtype`.""" dtype = dtypes.as_dtype(dtype) if hasattr(dtype, "name"): return dtype.name if hasattr(dtype, "__name__"): return dtype.__name__ return str(dtype) def is_ref(x): """Evaluates if the object has reference semantics. An object is deemed "reference" if it is a `tf.Variable` instance or is derived from a `tf.Module` with `dtype` and `shape` properties. Args: x: Any object. Returns: is_ref: Python `bool` indicating input is has nonreference semantics, i.e., is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties. """ return ( # Note: we check that tf.Variable is a class because we might be using a # different backend other than TF. isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, "dtype") and hasattr(x, "shape"))) def assert_not_ref_type(x, arg_name): if is_ref(x): raise TypeError( "Argument %s cannot be reference type. Found: %s" % (arg_name, type(x))) ################################################################################ # Asserts. ################################################################################ def assert_no_entries_with_modulus_zero( x, message=None, name="assert_no_entries_with_modulus_zero"): """Returns `Op` that asserts Tensor `x` has no entries with modulus zero. Args: x: Numeric `Tensor`, real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this `Op`. Returns: An `Op` that asserts `x` has no entries with modulus zero. """ with ops.name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") dtype = x.dtype.base_dtype should_be_nonzero = math_ops.abs(x) zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype) return check_ops.assert_less(zero, should_be_nonzero, message=message) def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"): """Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts. Args: x: Numeric `Tensor`, real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this `Op`. Returns: An `Op` that asserts `x` has no entries with modulus zero. """ with ops.name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") dtype = x.dtype.base_dtype if dtype.is_floating: return control_flow_ops.no_op() zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype) return check_ops.assert_equal(zero, math_ops.imag(x), message=message) def assert_compatible_matrix_dimensions(operator, x): """Assert that an argument to solve/matmul has proper domain dimension. If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then `operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an `Assert` that "fires" if this is not the case. Static checks are already done by the base class `LinearOperator`. Args: operator: `LinearOperator`. x: `Tensor`. Returns: `Assert` `Op`. """ # Static checks are done in the base class. Only tensor asserts here. assert_same_dd = check_ops.assert_equal( array_ops.shape(x)[-2], operator.domain_dimension_tensor(), # This error message made to look similar to error raised by static check # in the base class. message=("Dimensions are not compatible. " "shape[-2] of argument to be the same as this operator")) return assert_same_dd def assert_is_batch_matrix(tensor): """Static assert that `tensor` has rank `2` or higher.""" sh = tensor.shape if sh.ndims is not None and sh.ndims < 2: raise ValueError( "Expected [batch] matrix to have at least two dimensions. Found: " "%s" % tensor) def shape_tensor(shape, name=None): """Convert Tensor using default type, unless empty list or tuple.""" # Works just like random_ops._ShapeTensor. if isinstance(shape, (tuple, list)) and not shape: dtype = dtypes.int32 else: dtype = None return ops.convert_to_tensor(shape, dtype=dtype, name=name) ################################################################################ # Broadcasting versions of common linear algebra functions. # TODO(b/77519145) Do this more efficiently in some special cases. ################################################################################ def broadcast_matrix_batch_dims(batch_matrices, name=None): """Broadcast leading dimensions of zero or more [batch] matrices. Example broadcasting one batch dim of two simple matrices. ```python x = [[1, 2], [3, 4]] # Shape [2, 2], no batch dims y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1] x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) x_bc ==> [[[1, 2], [3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1]. y_bc ==> same as y ``` Example broadcasting many batch dims ```python x = tf.random.normal(shape=(2, 3, 1, 4, 4)) y = tf.random.normal(shape=(1, 3, 2, 5, 5)) x_bc, y_bc = broadcast_matrix_batch_dims([x, y]) x_bc.shape ==> (2, 3, 2, 4, 4) y_bc.shape ==> (2, 3, 2, 5, 5) ``` Args: batch_matrices: Iterable of `Tensor`s, each having two or more dimensions. name: A string name to prepend to created ops. Returns: bcast_matrices: List of `Tensor`s, with `bcast_matricies[i]` containing the values from `batch_matrices[i]`, with possibly broadcast batch dims. Raises: ValueError: If any input `Tensor` is statically determined to have less than two dimensions. """ with ops.name_scope( name or "broadcast_matrix_batch_dims", values=batch_matrices): check_ops.assert_proper_iterable(batch_matrices) batch_matrices = list(batch_matrices) for i, mat in enumerate(batch_matrices): batch_matrices[i] = ops.convert_to_tensor(mat) assert_is_batch_matrix(batch_matrices[i]) if len(batch_matrices) < 2: return batch_matrices # Try static broadcasting. # bcast_batch_shape is the broadcast batch shape of ALL matrices. # E.g. if batch_matrices = [x, y], with # x.shape = [2, j, k] (batch shape = [2]) # y.shape = [3, 1, l, m] (batch shape = [3, 1]) # ==> bcast_batch_shape = [3, 2] bcast_batch_shape = batch_matrices[0].shape[:-2] for mat in batch_matrices[1:]: bcast_batch_shape = array_ops.broadcast_static_shape( bcast_batch_shape, mat.shape[:-2]) if bcast_batch_shape.is_fully_defined(): for i, mat in enumerate(batch_matrices): if mat.shape[:-2] != bcast_batch_shape: bcast_shape = array_ops.concat( [bcast_batch_shape.as_list(), array_ops.shape(mat)[-2:]], axis=0) batch_matrices[i] = array_ops.broadcast_to(mat, bcast_shape) return batch_matrices # Since static didn't work, do dynamic, which always copies data. bcast_batch_shape = array_ops.shape(batch_matrices[0])[:-2] for mat in batch_matrices[1:]: bcast_batch_shape = array_ops.broadcast_dynamic_shape( bcast_batch_shape, array_ops.shape(mat)[:-2]) for i, mat in enumerate(batch_matrices): batch_matrices[i] = array_ops.broadcast_to( mat, array_ops.concat( [bcast_batch_shape, array_ops.shape(mat)[-2:]], axis=0)) return batch_matrices def cholesky_solve_with_broadcast(chol, rhs, name=None): """Solve systems of linear equations.""" with ops.name_scope(name, "CholeskySolveWithBroadcast", [chol, rhs]): chol, rhs = broadcast_matrix_batch_dims([chol, rhs]) return linalg_ops.cholesky_solve(chol, rhs) def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None): """Solve systems of linear equations.""" with ops.name_scope(name, "MatrixSolveWithBroadcast", [matrix, rhs]): matrix = ops.convert_to_tensor(matrix, name="matrix") rhs = ops.convert_to_tensor(rhs, name="rhs", dtype=matrix.dtype) # If either matrix/rhs has extra dims, we can reshape to get rid of them. matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency( matrix, rhs, adjoint_a=adjoint) # This will broadcast by brute force if we still need to. matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs]) solution = linalg_ops.matrix_solve( matrix, rhs, adjoint=adjoint and still_need_to_transpose) return reshape_inv(solution) def matrix_triangular_solve_with_broadcast(matrix, rhs, lower=True, adjoint=False, name=None): """Solves triangular systems of linear equations with by backsubstitution. Works identically to `tf.linalg.triangular_solve`, but broadcasts batch dims of `matrix` and `rhs` (by replicating) if they are determined statically to be different, or if static shapes are not fully defined. Thus, this may result in an inefficient replication of data. Args: matrix: A Tensor. Must be one of the following types: `float64`, `float32`, `complex64`, `complex128`. Shape is `[..., M, M]`. rhs: A `Tensor`. Must have the same `dtype` as `matrix`. Shape is `[..., M, K]`. lower: An optional `bool`. Defaults to `True`. Indicates whether the innermost matrices in `matrix` are lower or upper triangular. adjoint: An optional `bool`. Defaults to `False`. Indicates whether to solve with matrix or its (block-wise) adjoint. name: A name for the operation (optional). Returns: `Tensor` with same `dtype` as `matrix` and shape `[..., M, K]`. """ with ops.name_scope(name, "MatrixTriangularSolve", [matrix, rhs]): matrix = ops.convert_to_tensor(matrix, name="matrix") rhs = ops.convert_to_tensor(rhs, name="rhs", dtype=matrix.dtype) # If either matrix/rhs has extra dims, we can reshape to get rid of them. matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency( matrix, rhs, adjoint_a=adjoint) # lower indicates whether the matrix is lower triangular. If we have # manually taken adjoint inside _reshape_for_efficiency, it is now upper tri if not still_need_to_transpose and adjoint: lower = not lower # This will broadcast by brute force if we still need to. matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs]) solution = linalg_ops.matrix_triangular_solve( matrix, rhs, lower=lower, adjoint=adjoint and still_need_to_transpose) return reshape_inv(solution) def _reshape_for_efficiency(a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False): """Maybe reshape a, b, and return an inverse map. For matmul/solve.""" def identity(x): return x # At this point, we have not taken transpose/adjoint of a/b. still_need_to_transpose = True if a.shape.ndims is None or b.shape.ndims is None: return a, b, identity, still_need_to_transpose # This could be handled in the future, but seems less common. if a.shape.ndims >= b.shape.ndims: return a, b, identity, still_need_to_transpose # From now on, we might modify b, but will not modify a. # Suppose: # a.shape = C + [m, n], b.shape = # b.shape = S + C + [n, r] b_extra_ndims = b.shape.ndims - a.shape.ndims # b_extra_sh = S, b_main_sh = C + [n, r] b_extra_sh = array_ops.shape(b)[:b_extra_ndims] b_main_sh = array_ops.shape(b)[b_extra_ndims:] # No reason to flip unless the extra dims of b are big enough. Why? # Assume adjoint/transpose = False. Then... # By not flipping, we have to replicate a to shape # b_extra_sh + a.shape, # which could use extra memory. But in all cases, the final output has shape # b_extra_sh + a.shape[:-1] + [b.shape[-1]] # So we only end up creating a larger object if the end dim of b is smaller # than the end dim of a. This often happens, e.g. if b was a vector that was # expanded to a matrix (by appending a singleton). # Since adjoint/transpose may not be False, we must make adjustments here. # The dim of b that holds the multiple equations. a_domain_sz_ = a.shape[-2 if adjoint_a or transpose_a else -1] b_eq_sz_ = b.shape[-2 if adjoint_b or transpose_b else -1] b_extra_sz_ = ( np.prod(b.shape[:b_extra_ndims].as_list()) if b.shape[:b_extra_ndims].is_fully_defined() else None) if (a_domain_sz_ is not None and b_eq_sz_ is not None and b_extra_sz_ is not None): if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_: return a, b, identity, still_need_to_transpose # At this point, we're flipping for sure! # Any transposes/adjoints will happen here explicitly, rather than in calling # code. Why? To avoid having to write separate complex code for each case. if adjoint_a: a = linalg.adjoint(a) elif transpose_a: a = linalg.transpose(a) if adjoint_b: b = linalg.adjoint(b) elif transpose_b: b = linalg.transpose(b) still_need_to_transpose = False # Recompute shapes, since the transpose/adjoint may have changed them. b_extra_sh = array_ops.shape(b)[:b_extra_ndims] b_main_sh = array_ops.shape(b)[b_extra_ndims:] # Permutation to put the extra dims at the end. perm = ( np.concatenate( (np.arange(b_extra_ndims, b.shape.ndims), np.arange(0, b_extra_ndims)), 0)) b_extra_on_end = array_ops.transpose(b, perm=perm) # Now squash this end into one long dim. b_squashed_end = array_ops.reshape( b_extra_on_end, array_ops.concat((b_main_sh[:-1], [-1]), 0)) def reshape_inv(y): # Expand the extra dims hanging off the end, "b_extra_sh". # Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y # Could have different batch dims than a and b, because of broadcasting. y_extra_shape = array_ops.concat( (array_ops.shape(y)[:-1], [b_main_sh[-1]], b_extra_sh), 0) y_extra_on_end = array_ops.reshape(y, y_extra_shape) inverse_perm = np.argsort(perm) return array_ops.transpose(y_extra_on_end, perm=inverse_perm) return a, b_squashed_end, reshape_inv, still_need_to_transpose ################################################################################ # Helpers for hints. ################################################################################ def use_operator_or_provided_hint_unless_contradicting( operator, hint_attr_name, provided_hint_value, message): """Get combined hint in the case where operator.hint should equal hint. Args: operator: LinearOperator that a meta-operator was initialized with. hint_attr_name: String name for the attribute. provided_hint_value: Bool or None. Value passed by user in initialization. message: Error message to print if hints contradict. Returns: True, False, or None. Raises: ValueError: If hints contradict. """ op_hint = getattr(operator, hint_attr_name) # pylint: disable=g-bool-id-comparison if op_hint is False and provided_hint_value: raise ValueError(message) if op_hint and provided_hint_value is False: raise ValueError(message) if op_hint or provided_hint_value: return True if op_hint is False or provided_hint_value is False: return False # pylint: enable=g-bool-id-comparison return None
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.cholesky.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_block_diag from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_kronecker from tensorflow.python.ops.linalg import linear_operator_lower_triangular # By default, compute the Cholesky of the dense matrix, and return a # LowerTriangular operator. Methods below specialize this registration. @linear_operator_algebra.RegisterCholesky(linear_operator.LinearOperator) def _cholesky_linear_operator(linop): return linear_operator_lower_triangular.LinearOperatorLowerTriangular( linalg_ops.cholesky(linop.to_dense()), is_non_singular=True, is_self_adjoint=False, is_square=True) @linear_operator_algebra.RegisterCholesky( linear_operator_diag.LinearOperatorDiag) def _cholesky_diag(diag_operator): return linear_operator_diag.LinearOperatorDiag( math_ops.sqrt(diag_operator.diag), is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True) @linear_operator_algebra.RegisterCholesky( linear_operator_identity.LinearOperatorIdentity) def _cholesky_identity(identity_operator): return linear_operator_identity.LinearOperatorIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access batch_shape=identity_operator.batch_shape, dtype=identity_operator.dtype, is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True) @linear_operator_algebra.RegisterCholesky( linear_operator_identity.LinearOperatorScaledIdentity) def _cholesky_scaled_identity(identity_operator): return linear_operator_identity.LinearOperatorScaledIdentity( num_rows=identity_operator._num_rows, # pylint: disable=protected-access multiplier=math_ops.sqrt(identity_operator.multiplier), is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True) @linear_operator_algebra.RegisterCholesky( linear_operator_block_diag.LinearOperatorBlockDiag) def _cholesky_block_diag(block_diag_operator): # We take the cholesky of each block on the diagonal. return linear_operator_block_diag.LinearOperatorBlockDiag( operators=[ operator.cholesky() for operator in block_diag_operator.operators], is_non_singular=True, is_self_adjoint=False, is_square=True) @linear_operator_algebra.RegisterCholesky( linear_operator_kronecker.LinearOperatorKronecker) def _cholesky_kronecker(kronecker_operator): # Cholesky decomposition of a Kronecker product is the Kronecker product # of cholesky decompositions. return linear_operator_kronecker.LinearOperatorKronecker( operators=[ operator.cholesky() for operator in kronecker_operator.operators], is_non_singular=True, is_self_adjoint=False, is_square=True)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/cholesky_registrations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Registrations for LinearOperator.matmul.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_algebra from tensorflow.python.ops.linalg import linear_operator_circulant from tensorflow.python.ops.linalg import linear_operator_composition from tensorflow.python.ops.linalg import linear_operator_diag from tensorflow.python.ops.linalg import linear_operator_identity from tensorflow.python.ops.linalg import linear_operator_lower_triangular from tensorflow.python.ops.linalg import linear_operator_zeros from tensorflow.python.ops.linalg import registrations_util # By default, use a LinearOperatorComposition to delay the computation. @linear_operator_algebra.RegisterMatmul( linear_operator.LinearOperator, linear_operator.LinearOperator) def _matmul_linear_operator(linop_a, linop_b): """Generic matmul of two `LinearOperator`s.""" is_square = registrations_util.is_square(linop_a, linop_b) is_non_singular = None is_self_adjoint = None is_positive_definite = None if is_square: is_non_singular = registrations_util.combined_non_singular_hint( linop_a, linop_b) elif is_square is False: # pylint:disable=g-bool-id-comparison is_non_singular = False is_self_adjoint = False is_positive_definite = False return linear_operator_composition.LinearOperatorComposition( operators=[linop_a, linop_b], is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, ) # Identity @linear_operator_algebra.RegisterMatmul( linear_operator_identity.LinearOperatorIdentity, linear_operator.LinearOperator) def _matmul_linear_operator_identity_left(identity, linop): del identity return linop @linear_operator_algebra.RegisterMatmul( linear_operator.LinearOperator, linear_operator_identity.LinearOperatorIdentity) def _matmul_linear_operator_identity_right(linop, identity): del identity return linop # Zeros @linear_operator_algebra.RegisterMatmul( linear_operator.LinearOperator, linear_operator_zeros.LinearOperatorZeros) def _matmul_linear_operator_zeros_right(linop, zeros): if not zeros.is_square or not linop.is_square: raise ValueError("Matmul with non-square `LinearOperator`s or non-square " "`LinearOperatorZeros` not supported at this time.") return zeros @linear_operator_algebra.RegisterMatmul( linear_operator_zeros.LinearOperatorZeros, linear_operator.LinearOperator) def _matmul_linear_operator_zeros_left(zeros, linop): if not zeros.is_square or not linop.is_square: raise ValueError("Matmul with non-square `LinearOperator`s or non-square " "`LinearOperatorZeros` not supported at this time.") return zeros # Diag. @linear_operator_algebra.RegisterMatmul( linear_operator_diag.LinearOperatorDiag, linear_operator_diag.LinearOperatorDiag) def _matmul_linear_operator_diag(linop_a, linop_b): return linear_operator_diag.LinearOperatorDiag( diag=linop_a.diag * linop_b.diag, is_non_singular=registrations_util.combined_non_singular_hint( linop_a, linop_b), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_a, linop_b), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_a, linop_b)), is_square=True) @linear_operator_algebra.RegisterMatmul( linear_operator_diag.LinearOperatorDiag, linear_operator_identity.LinearOperatorScaledIdentity) def _matmul_linear_operator_diag_scaled_identity_right( linop_diag, linop_scaled_identity): return linear_operator_diag.LinearOperatorDiag( diag=linop_diag.diag * linop_scaled_identity.multiplier, is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_scaled_identity), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_scaled_identity), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_diag, linop_scaled_identity)), is_square=True) @linear_operator_algebra.RegisterMatmul( linear_operator_identity.LinearOperatorScaledIdentity, linear_operator_diag.LinearOperatorDiag) def _matmul_linear_operator_diag_scaled_identity_left( linop_scaled_identity, linop_diag): return linear_operator_diag.LinearOperatorDiag( diag=linop_diag.diag * linop_scaled_identity.multiplier, is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_scaled_identity), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_scaled_identity), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_diag, linop_scaled_identity)), is_square=True) @linear_operator_algebra.RegisterMatmul( linear_operator_diag.LinearOperatorDiag, linear_operator_lower_triangular.LinearOperatorLowerTriangular) def _matmul_linear_operator_diag_tril(linop_diag, linop_triangular): return linear_operator_lower_triangular.LinearOperatorLowerTriangular( tril=linop_diag.diag[..., None] * linop_triangular.to_dense(), is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_triangular), # This is safe to do since the Triangular matrix is only self-adjoint # when it is a diagonal matrix, and hence commutes. is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_triangular), is_positive_definite=None, is_square=True) @linear_operator_algebra.RegisterMatmul( linear_operator_lower_triangular.LinearOperatorLowerTriangular, linear_operator_diag.LinearOperatorDiag) def _matmul_linear_operator_tril_diag(linop_triangular, linop_diag): return linear_operator_lower_triangular.LinearOperatorLowerTriangular( tril=linop_triangular.to_dense() * linop_diag.diag, is_non_singular=registrations_util.combined_non_singular_hint( linop_diag, linop_triangular), # This is safe to do since the Triangular matrix is only self-adjoint # when it is a diagonal matrix, and hence commutes. is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_diag, linop_triangular), is_positive_definite=None, is_square=True) # Circulant. @linear_operator_algebra.RegisterMatmul( linear_operator_circulant.LinearOperatorCirculant, linear_operator_circulant.LinearOperatorCirculant) def _matmul_linear_operator_circulant_circulant(linop_a, linop_b): return linear_operator_circulant.LinearOperatorCirculant( spectrum=linop_a.spectrum * linop_b.spectrum, is_non_singular=registrations_util.combined_non_singular_hint( linop_a, linop_b), is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint( linop_a, linop_b), is_positive_definite=( registrations_util.combined_commuting_positive_definite_hint( linop_a, linop_b)), is_square=True)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/matmul_registrations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Takes the adjoint of a `LinearOperator`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg_impl as linalg from tensorflow.python.ops.linalg import linear_operator from tensorflow.python.ops.linalg import linear_operator_util from tensorflow.python.util.tf_export import tf_export __all__ = [] @tf_export("linalg.LinearOperatorAdjoint") class LinearOperatorAdjoint(linear_operator.LinearOperator): """`LinearOperator` representing the adjoint of another operator. This operator represents the adjoint of another operator. ```python # Create a 2 x 2 linear operator. operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]]) operator_adjoint = LinearOperatorAdjoint(operator) operator_adjoint.to_dense() ==> [[1. + i, 0.] [3., 1 - i]] operator_adjoint.shape ==> [2, 2] operator_adjoint.log_abs_determinant() ==> - log(2) x = ... Shape [2, 4] Tensor operator_adjoint.matmul(x) ==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True) ``` #### Performance The performance of `LinearOperatorAdjoint` depends on the underlying operators performance. #### Matrix property hints This `LinearOperator` is initialized with boolean flags of the form `is_X`, for `X = non_singular, self_adjoint, positive_definite, square`. These have the following meaning: * If `is_X == True`, callers should expect the operator to have the property `X`. This is a promise that should be fulfilled, but is *not* a runtime assert. For example, finite floating point precision may result in these promises being violated. * If `is_X == False`, callers should expect the operator to not have `X`. * If `is_X == None` (the default), callers should have no expectation either way. """ def __init__(self, operator, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize a `LinearOperatorAdjoint`. `LinearOperatorAdjoint` is initialized with an operator `A`. The `solve` and `matmul` methods effectively flip the `adjoint` argument. E.g. ``` A = MyLinearOperator(...) B = LinearOperatorAdjoint(A) x = [....] # a vector assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False) ``` Args: operator: `LinearOperator` object. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Default is `operator.name + "_adjoint"`. Raises: ValueError: If `operator.is_non_singular` is False. """ self._operator = operator # The congruency of is_non_singular and is_self_adjoint was checked in the # base operator. combine_hint = ( linear_operator_util.use_operator_or_provided_hint_unless_contradicting) is_square = combine_hint( operator, "is_square", is_square, "An operator is square if and only if its adjoint is square.") is_non_singular = combine_hint( operator, "is_non_singular", is_non_singular, "An operator is non-singular if and only if its adjoint is " "non-singular.") is_self_adjoint = combine_hint( operator, "is_self_adjoint", is_self_adjoint, "An operator is self-adjoint if and only if its adjoint is " "self-adjoint.") is_positive_definite = combine_hint( operator, "is_positive_definite", is_positive_definite, "An operator is positive-definite if and only if its adjoint is " "positive-definite.") # Initialization. if name is None: name = operator.name + "_adjoint" with ops.name_scope(name, values=operator.graph_parents): super(LinearOperatorAdjoint, self).__init__( dtype=operator.dtype, graph_parents=operator.graph_parents, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name) @property def operator(self): """The operator before taking the adjoint.""" return self._operator def _assert_non_singular(self): return self.operator.assert_non_singular() def _assert_positive_definite(self): return self.operator.assert_positive_definite() def _assert_self_adjoint(self): return self.operator.assert_self_adjoint() def _shape(self): # Rotate last dimension shape = self.operator.shape return shape[:-2].concatenate([shape[-1], shape[-2]]) def _shape_tensor(self): # Rotate last dimension shape = self.operator.shape_tensor() return array_ops.concat([ shape[:-2], [shape[-1], shape[-2]]], axis=-1) def _matmul(self, x, adjoint=False, adjoint_arg=False): return self.operator.matmul( x, adjoint=(not adjoint), adjoint_arg=adjoint_arg) def _matvec(self, x, adjoint=False): return self.operator.matvec(x, adjoint=(not adjoint)) def _determinant(self): if self.is_self_adjoint: return self.operator.determinant() return math_ops.conj(self.operator.determinant()) def _log_abs_determinant(self): return self.operator.log_abs_determinant() def _trace(self): if self.is_self_adjoint: return self.operator.trace() return math_ops.conj(self.operator.trace()) def _solve(self, rhs, adjoint=False, adjoint_arg=False): return self.operator.solve( rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg) def _solvevec(self, rhs, adjoint=False): return self.operator.solvevec(rhs, adjoint=(not adjoint)) def _to_dense(self): if self.is_self_adjoint: return self.operator.to_dense() return linalg.adjoint(self.operator.to_dense()) def _add_to_tensor(self, x): return self.to_dense() + x
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/linalg/linear_operator_adjoint.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_factory_ops.constant_value.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedConstantValueOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters( #========================================================================= # 0-dimensional tensors. dict(pylist='x', expected_shape=()), #========================================================================= # 1-dimensional tensors. dict(pylist=[1, 2, 3], expected_shape=(3,)), #========================================================================= # 2-dimensional tensors. dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)), dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)), #========================================================================= # 3-dimensional tensors. dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], expected_shape=(3, None, None)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], inner_shape=(2,), expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, inner_shape=(2,), expected_shape=(3, None, 2)), # 3-dimensional tensors with numpy arrays dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], expected_shape=(3, None, None)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], inner_shape=(2,), expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, inner_shape=(2,), expected_shape=(3, None, 2)), #========================================================================= # 4-dimensional tensors. dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], expected_shape=(2, None, None, None)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], ragged_rank=1, expected_shape=(2, None, 2, 2)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], inner_shape=(2,), expected_shape=(2, None, None, 2)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], inner_shape=(2, 2), expected_shape=(2, None, 2, 2)), # 4-dimensional tensors with numpy arrays dict( pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]], np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]), expected_shape=(2, None, None, None)), #========================================================================= # Empty tensors (no scalar values) w/ default ragged_rank and inner_shape dict(pylist=[], expected_shape=(0,)), dict(pylist=[[], [], np.array([])], expected_shape=(3, None)), dict( pylist=[[[], []], [], [[], [[]]]], expected_shape=(3, None, None, None)), dict( pylist=np.array([np.array([[], []]), np.array([]), [[], [[]]]]), expected_shape=(3, None, None, None)), #========================================================================= # Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape dict(pylist=[], ragged_rank=1, expected_shape=(0, None)), dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)), dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)), dict( pylist=[], ragged_rank=1, inner_shape=(100, 20), expected_shape=(0, None, 100, 20)), dict( pylist=[], ragged_rank=2, inner_shape=(100, 20), expected_shape=(0, None, None, 100, 20)), dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)), dict(pylist=[], inner_shape=(0,), expected_shape=(0,)), dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)), dict( pylist=np.array([]), ragged_rank=1, inner_shape=(100, 20), expected_shape=(0, None, 100, 20)), #========================================================================= # default/inferred dtypes. # # Note: numpy has different default/inferred types than tensorflow. # Since we are using values, not tensors, we get the default numpy types # here. dict(pylist=[], expected_dtype=np.float64), dict(pylist=[[[], [[[]], []]]], expected_dtype=np.float64), dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=np.int64), dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=np.float64), dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=np.float64), dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=np.dtype('S1')), dict(pylist=[[True]], expected_dtype=np.bool), dict( pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]], expected_dtype=np.float64), #========================================================================= # explicit dtypes dict(pylist=[], dtype=np.float32), dict(pylist=[], dtype=np.dtype('S1')), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int64), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int32), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.float32), dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float16), dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float32), dict( pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']], dtype=np.dtype('S1')), dict(pylist=[], dtype=dtypes.float32, expected_dtype=np.float32), dict(pylist=[], dtype=dtypes.int32, expected_dtype=np.int32), ) def testRaggedValues(self, pylist, dtype=None, ragged_rank=None, inner_shape=None, expected_shape=None, expected_dtype=None): """Tests that `ragged_value(pylist).to_list() == pylist`.""" rt = ragged_factory_ops.constant_value( pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape) # Normalize the pylist, i.e., convert all np.arrays to list. # E.g., [np.array((1,2))] --> [[1,2]] pylist = _normalize_pylist(pylist) # If dtype was explicitly specified, check it. if expected_dtype is not None: self.assertEqual(rt.dtype, expected_dtype) elif dtype is not None: self.assertEqual(rt.dtype, dtype) # If ragged_rank was explicitly specified, check it. if ragged_rank is not None: if isinstance(rt, ragged_tensor_value.RaggedTensorValue): self.assertEqual(rt.ragged_rank, ragged_rank) else: self.assertEqual(0, ragged_rank) # If inner_shape was explicitly specified, check it. if inner_shape is not None: if isinstance(rt, ragged_tensor_value.RaggedTensorValue): self.assertEqual(rt.flat_values.shape[1:], inner_shape) else: self.assertEqual(rt.shape, inner_shape) if expected_shape is not None: self.assertEqual(tuple(rt.shape), expected_shape) if rt.shape: if isinstance(rt, ragged_tensor_value.RaggedTensorValue): self.assertEqual(rt.to_list(), pylist) else: self.assertEqual(rt.tolist(), pylist) if expected_shape is not None: self.assertEqual(rt.shape, expected_shape) else: self.assertEqual(rt, pylist) if expected_shape is not None: self.assertEqual((), expected_shape) @parameterized.parameters( dict( pylist=12, ragged_rank=1, exception=ValueError, message='Invalid pylist=12: incompatible with ragged_rank=1'), dict( pylist=np.array(12), ragged_rank=1, exception=ValueError, message='Invalid pylist=array\\(12\\): incompatible with ' 'ragged_rank=1'), dict( pylist=12, inner_shape=(1,), exception=ValueError, message='Invalid pylist=12: incompatible with ' 'dim\\(inner_shape\\)=1'), dict( pylist=[[[1], [2]]], ragged_rank=-1, exception=ValueError, message='Invalid ragged_rank=-1: must be nonnegative'), dict( pylist=[[1, [2]]], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[[1]], [[[2]]]], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[1], [[]]], exception=ValueError, message='Invalid pylist=.*: empty list nesting is greater ' 'than scalar value nesting'), dict( pylist=[1, 2, 3], ragged_rank=1, exception=ValueError, message='pylist has scalar values depth 1, but ragged_rank=1 ' 'requires scalar value depth greater than 1'), dict( pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], ragged_rank=2, exception=ValueError, message='pylist has scalar values depth 2, but ragged_rank=2 ' 'requires scalar value depth greater than 2'), dict( pylist=[1, 2, 3], inner_shape=(1, 1), exception=ValueError, message='cannot reshape array'), dict( pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], inner_shape=(2, 2), ragged_rank=1, exception=ValueError, message='Invalid pylist=.*: incompatible with ragged_rank=1 and ' 'dim\\(inner_shape\\)=2'), dict( pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), dict( pylist=[[[], [[]]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), ) def testRaggedValuesError(self, pylist, dtype=None, ragged_rank=None, inner_shape=None, exception=None, message=None): """Tests that `constant_value()` raises an expected exception.""" self.assertRaisesRegexp( exception, message, ragged_factory_ops.constant_value, pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape) def _normalize_pylist(item): """Convert all (possibly nested) np.arrays contained in item to list.""" # convert np.arrays in current level to list if np.ndim(item) == 0: return item level = (x.tolist() if isinstance(x, np.ndarray) else x for x in item) return [_normalize_pylist(el) if np.ndim(el) != 0 else el for el in level] if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_constant_value_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RaggedTensor.from_tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testDocStringExamples(self): # The examples from RaggedTensor.from_tensor.__doc__. dt = constant_op.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) self.assertAllEqual( RaggedTensor.from_tensor(dt), [[5, 7, 0], [0, 3, 0], [6, 0, 0]]) self.assertAllEqual( RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]), [[5], [], [6, 0, 0]]) self.assertAllEqual( RaggedTensor.from_tensor(dt, padding=0), [[5, 7], [0, 3], [6]]) dt_3d = constant_op.constant([[[5, 0], [7, 0], [0, 0]], [[0, 0], [3, 0], [0, 0]], [[6, 0], [0, 0], [0, 0]]]) self.assertAllEqual( RaggedTensor.from_tensor(dt_3d, lengths=([2, 0, 3], [1, 1, 2, 0, 1])), [[[5], [7]], [], [[6, 0], [], [0]]]) @parameterized.parameters( # 2D test cases, no length or padding. { 'tensor': [[]], 'expected': [[]], }, { 'tensor': [[1]], 'expected': [[1]], }, { 'tensor': [[1, 2]], 'expected': [[1, 2]], }, { 'tensor': [[1], [2], [3]], 'expected': [[1], [2], [3]], }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'expected': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], }, # 3D test cases, no length or padding { 'tensor': [[[]]], 'expected': [[[]]], }, { 'tensor': [[[]]], 'expected': [[[]]], 'ragged_rank': 1, }, { 'tensor': [[[1]]], 'expected': [[[1]]], }, { 'tensor': [[[1, 2]]], 'expected': [[[1, 2]]], }, { 'tensor': [[[1, 2], [3, 4]]], 'expected': [[[1, 2], [3, 4]]], }, { 'tensor': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]], 'expected': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]], }, { 'tensor': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]], 'expected': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]], }, # 2D test cases, with length { 'tensor': [[1]], 'lengths': [1], 'expected': [[1]] }, { 'tensor': [[1]], 'lengths': [0], 'expected': [[]] }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'lengths': [0, 1, 2], 'expected': [[], [4], [7, 8]] }, { 'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'lengths': [0, 0, 0], 'expected': [[], [], []] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [2, 2], 'expected': [[1, 2], [3, 4]] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [7, 8], # lengths > ncols: truncated to ncols 'expected': [[1, 2], [3, 4]] }, { 'tensor': [[1, 2], [3, 4]], 'lengths': [-2, -1], # lengths < 0: treated as zero 'expected': [[], []] }, # 3D test cases, with length { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [0, 0], 'expected': [[], []] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [1, 2], 'expected': [[[1, 2]], [[5, 6], [7, 8]]] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'lengths': [2, 2], 'expected': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] }, # 2D test cases, with padding { 'tensor': [[1]], 'padding': 0, 'expected': [[1]] }, { 'tensor': [[0]], 'padding': 0, 'expected': [[]] }, { 'tensor': [[0, 1]], 'padding': 0, 'expected': [[0, 1]] }, { 'tensor': [[1, 0]], 'padding': 0, 'expected': [[1]] }, { 'tensor': [[1, 0, 1, 0, 0, 1, 0, 0]], 'padding': 0, 'expected': [[1, 0, 1, 0, 0, 1]] }, { 'tensor': [[3, 7, 0, 0], [2, 0, 0, 0], [5, 0, 0, 0]], 'padding': 0, 'expected': [[3, 7], [2], [5]] }, # 3D test cases, with padding { 'tensor': [[[1]]], 'padding': [0], 'expected': [[[1]]] }, { 'tensor': [[[0]]], 'padding': [0], 'expected': [[]] }, { 'tensor': [[[0, 0], [1, 2]], [[3, 4], [0, 0]]], 'padding': [0, 0], 'expected': [[[0, 0], [1, 2]], [[3, 4]]] }, # 4D test cases, with padding { 'tensor': [ [[[1, 2], [3, 4]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]], [[0, 0], [0, 0]]], [[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]] ], 'padding': [[0, 0], [0, 0]], 'expected': [ [[[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]], [] ] }, # 3D test cases, with ragged_rank=2. { 'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]], 'ragged_rank': 2, 'expected': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]] }, { 'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 'ragged_rank': 2, 'lengths': [2, 0, 2, 1], 'expected': [[[1, 2], []], [[5, 6], [7]]] }, { 'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]], 'ragged_rank': 2, 'padding': 0, 'expected': [[[1], [2, 3]], [[], [4]]] }, # 4D test cases, with ragged_rank>1 { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 2, 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 3, 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 2, 'padding': [0, 0], 'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8]]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'lengths': ([2, 2], [1, 2, 2, 1]), 'expected': [[[[1, 0]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8]]]], 'ragged_rank': 2, 'use_ragged_rank': False }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'lengths': [[2, 2], [1, 2, 2, 1]], 'expected': [[[[1, 0]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8]]]], 'ragged_rank': 2, 'use_ragged_rank': False }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'ragged_rank': 3, 'padding': 0, 'expected': [[[[1], [2, 3]], [[], [4]]], [[[5, 6], [7]], [[0, 8], []]]] }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'lengths': ([2, 2], [2, 2, 2, 2], [1, 2, 0, 1, 2, 1, 2, 0]), 'expected': [[[[1], [2, 3]], [[], [4]]], [[[5, 6], [7]], [[0, 8], []]]], 'ragged_rank': 3, 'use_ragged_rank': False }, { 'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]], [[[5, 6], [7, 0]], [[0, 8], [0, 0]]]], 'lengths': [[2, 2], [2, 2, 2, 2], [1, 2, 0, 1, 2, 1, 2, 0]], 'expected': [[[[1], [2, 3]], [[], [4]]], [[[5, 6], [7]], [[0, 8], []]]], 'ragged_rank': 3, 'use_ragged_rank': False }, ) # pyformat: disable def testRaggedFromTensor(self, tensor, expected, lengths=None, padding=None, ragged_rank=1, use_ragged_rank=True): dt = constant_op.constant(tensor) if use_ragged_rank: rt = RaggedTensor.from_tensor(dt, lengths, padding, ragged_rank) else: rt = RaggedTensor.from_tensor(dt, lengths, padding) self.assertEqual(type(rt), RaggedTensor) self.assertEqual(rt.ragged_rank, ragged_rank) self.assertTrue( dt.shape.is_compatible_with(rt.shape), '%s is incompatible with %s' % (dt.shape, rt.shape)) self.assertAllEqual(rt, expected) def testHighDimensions(self): # Use distinct prime numbers for all dimension shapes in this test, so # we can see any errors that are caused by mixing up dimension sizes. dt = array_ops.reshape( math_ops.range(3 * 5 * 7 * 11 * 13 * 17), [3, 5, 7, 11, 13, 17]) for ragged_rank in range(1, 4): rt = RaggedTensor.from_tensor(dt, ragged_rank=ragged_rank) self.assertEqual(type(rt), RaggedTensor) self.assertEqual(rt.ragged_rank, ragged_rank) self.assertTrue( dt.shape.is_compatible_with(rt.shape), '%s is incompatible with %s' % (dt.shape, rt.shape)) self.assertAllEqual(rt, self.evaluate(dt).tolist()) @parameterized.parameters( # With no padding or lengths { 'dt_shape': [0, 0], 'expected': [] }, { 'dt_shape': [0, 3], 'expected': [] }, { 'dt_shape': [3, 0], 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'expected': [[[], [], []], [[], [], []]] }, { 'dt_shape': [2, 3, 0, 1], 'expected': [[[], [], []], [[], [], []]] }, { 'dt_shape': [2, 3, 1, 0], 'expected': [[[[]], [[]], [[]]], [[[]], [[]], [[]]]] }, # With padding { 'dt_shape': [0, 0], 'padding': 0, 'expected': [] }, { 'dt_shape': [0, 3], 'padding': 0, 'expected': [] }, { 'dt_shape': [3, 0], 'padding': 0, 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'padding': [0, 0, 0], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'padding': [0, 0, 0], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'padding': [], 'expected': [[], []] }, # With lengths { 'dt_shape': [0, 0], 'lengths': [], 'expected': [] }, { 'dt_shape': [0, 3], 'lengths': [], 'expected': [] }, { 'dt_shape': [3, 0], 'lengths': [0, 0, 0], 'expected': [[], [], []] }, { 'dt_shape': [3, 0], 'lengths': [2, 3, 4], # lengths > ncols: truncated to ncols 'expected': [[], [], []] }, { 'dt_shape': [0, 2, 3], 'lengths': [], 'expected': [] }, { 'dt_shape': [2, 0, 3], 'lengths': [0, 0], 'expected': [[], []] }, { 'dt_shape': [2, 3, 0], 'lengths': [0, 0], 'expected': [[], []] }, ) def testEmpty(self, dt_shape, expected, lengths=None, padding=None): dt = array_ops.zeros(dt_shape) rt = RaggedTensor.from_tensor(dt, lengths, padding) self.assertEqual(type(rt), RaggedTensor) self.assertEqual(rt.ragged_rank, 1) self.assertTrue(dt.shape.is_compatible_with(rt.shape)) self.assertAllEqual(rt, expected) @parameterized.parameters( { 'tensor': [[1]], 'lengths': [0], 'padding': 0, 'error': (ValueError, 'Specify lengths or padding, but not both') }, { 'tensor': [[1]], 'lengths': [0.5], 'error': (TypeError, 'lengths must be an integer tensor') }, { 'tensor': [[1, 2, 3]], 'lengths': [[1], [1]], 'error': (ValueError, r'Shape \(1, 3\) must have rank at least 3') }, { 'tensor': [[1]], 'padding': 'a', 'error': (TypeError, '.*') }, { 'tensor': [[1]], 'padding': [1], 'error': (ValueError, r'Shapes \(1,\) and \(\) are incompatible') }, { 'tensor': [[[1]]], 'padding': 1, 'error': (ValueError, r'Shapes \(\) and \(1,\) are incompatible') }, { 'tensor': [[1]], 'ragged_rank': 'bad', 'error': (TypeError, r'ragged_rank expected int, got \'bad\'') }, { 'tensor': [[1]], 'ragged_rank': 0, 'error': (ValueError, r'ragged_rank must be greater than 0; got 0') }, { 'tensor': [[1]], 'ragged_rank': -1, 'error': (ValueError, r'ragged_rank must be greater than 0; got -1') }, ) def testErrors(self, tensor, lengths=None, padding=None, ragged_rank=1, error=None): dt = constant_op.constant(tensor) self.assertRaisesRegexp(error[0], error[1], RaggedTensor.from_tensor, dt, lengths, padding, ragged_rank) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Batch gather operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util #=============================================================================== # ragged.batch_gather #=============================================================================== def batch_gather(params, indices, name=None): """Gathers slices from `params` according to `indices` with batch dims. This operation is similar to `gather`, but it assumes that the leading `N` dimensions of `indices` and `params` are batch dimensions, and performs a gather within each batch. In particular, when using this operation with `N` batch dimensions `B1...BN`: * `indices` has shape `[B1...BN, I]` * `params` has shape `[B1...BN, P1...PM]`. * `result` has shape `[B1...BN, I, P2...PM]`. * `result[b1...bN, i, p2...pM] = params[b1...bN, indices[b1...bN, i], p2...pM]` Args: params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, `M>0`). indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. #### Example: ```python >>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]]) >>> tf.compat.v1.batch_gather(params, indices) [['b', 'c', 'a'], [], [], ['e', 'e']] ``` """ if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)): return array_ops.batch_gather(params, indices, name) with ops.name_scope(name, 'RaggedBatchGather', [params, indices]): params = ragged_tensor.convert_to_tensor_or_ragged_tensor( params, name='params') indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( indices, name='indices') params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) indices_ndims = indices.shape.ndims if indices_ndims is None: raise ValueError( 'batch_gather does not allow indices with unknown shape.') if indices_ndims == 0: raise ValueError('indices.rank must be at least 1.') if ragged_tensor.is_ragged(indices): # If the outermost ragged dimension is a batch dimension, recurse. if indices_ndims > 2: if not ragged_tensor.is_ragged(params): raise ValueError('batch shape from indices does ' 'not match params shape') checks = [check_ops.assert_equal(params.row_splits, indices.row_splits)] with ops.control_dependencies(checks): return ragged_tensor.RaggedTensor.from_row_splits( batch_gather(params.values, indices.values), indices.row_splits, validate=False) # Otherwise, indices is a 2D ragged tensor with 1 ragged dimension. else: # Ensure that `params` is ragged and has at least 2 dimensions. if not ragged_tensor.is_ragged(params): if params.shape.ndims is not None and params.shape.ndims < 2: raise ValueError('batch shape from indices does ' 'not match params shape') params = ragged_tensor.RaggedTensor.from_tensor( params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype) # Adjust indices from within-batch to global (in params.values), and # then use ragged.gather to gather them. num_indices = indices.row_lengths() params_starts = params.row_starts() adjustments = ragged_util.repeat(params_starts, num_indices, axis=0) adjusted_index_values = ( math_ops.cast(indices.values, adjustments.dtype) + adjustments) return ragged_tensor.RaggedTensor.from_row_splits( ragged_gather_ops.gather(params.values, adjusted_index_values), indices.row_splits, validate=False) else: # params is a RaggedTensor and indices is a Tensor. if indices_ndims == 1: return ragged_gather_ops.gather(params, indices) elif indices_ndims == 2: # Adjust indices from batch-local to global (in params.values) adjustments = array_ops.expand_dims(params.row_starts(), 1) adjusted_indices = ( math_ops.cast(indices, adjustments.dtype) + adjustments) return ragged_gather_ops.gather(params.values, adjusted_indices) else: raise ValueError('batch shape from indices does not match params shape')
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_batch_gather_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Configuration parameters for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def auto_cast_partition_dtype(): """Whether incopmatible row-partitioning dtypes should be auto-converted. If true, then operations that combine RaggedTensors but have different row-partitioning tensor dtypes will be automatically cast to a compatible dtype (`tf.int64`). If false, then such operations will result in an error. Returns: `bool` """ return False
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_config.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_tensor.convert_to_tensor_or_ragged.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedConvertToTensorOrRaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase): #============================================================================= # Tests where the 'value' param is a RaggedTensor #============================================================================= @parameterized.parameters([ dict(pylist=[[1, 2], [3]]), dict(pylist=[[1, 2], [3]], preferred_dtype=dtypes.float32), dict(pylist=[[1, 2], [3]], preferred_dtype=dtypes.string), # Note: Conversion of a single np.array is tested below. These tests # check nestings consisting of multiple or irregularily-shaped np.arrays. dict( pylist=[np.array([1, 2]), np.array([3])], preferred_dtype=dtypes.string), dict(pylist=np.array([[1, 2], [3]]), preferred_dtype=dtypes.float32), dict(pylist=np.array([[1, 2], [3]]), preferred_dtype=dtypes.string), dict( pylist=[np.array([[1], np.array([2])]), [np.array([3])]], preferred_dtype=dtypes.float32), dict(pylist=[np.array(1)], preferred_dtype=dtypes.string), ]) def testConvertRaggedTensor(self, pylist, dtype=None, preferred_dtype=None): rt = ragged_factory_ops.constant(pylist) converted = ragged_tensor.convert_to_tensor_or_ragged_tensor( rt, dtype, preferred_dtype) self.assertIs(converted, rt) @parameterized.parameters([ dict( pylist=[[1, 2], [3, 4]], dtype=dtypes.float32, message=('Tensor conversion requested dtype float32 for ' 'RaggedTensor with dtype int32')), dict( pylist=np.array([[1, 2], [3, 4]]), dtype=dtypes.float32, message=('Tensor conversion requested dtype float32 for ' 'RaggedTensor with dtype int32')), dict( pylist=[[1, 2], [3, 4]], dtype=dtypes.string, message=('Tensor conversion requested dtype string for ' 'RaggedTensor with dtype .*')), ]) def testConvertRaggedTensorError(self, pylist, message, dtype=None, preferred_dtype=None): rt = ragged_factory_ops.constant(pylist) with self.assertRaisesRegexp(ValueError, message): ragged_tensor.convert_to_tensor_or_ragged_tensor(rt, dtype, preferred_dtype) #============================================================================= # Tests where the 'value' param is a RaggedTensorValue #============================================================================= @parameterized.parameters( [ dict( value=ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32), expected_dtype=dtypes.int32), dict( value=ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']]), expected_dtype=dtypes.string), dict( value=ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32), dtype=dtypes.float32, expected_dtype=dtypes.float32), dict( value=ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32), preferred_dtype=dtypes.float32, expected_dtype=dtypes.float32), dict( value=ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32), preferred_dtype=dtypes.string, expected_dtype=dtypes.int32), ]) def testConvertRaggedTensorValue(self, value, dtype=None, preferred_dtype=None, expected_dtype=None): if expected_dtype is None: expected_dtype = value.dtype if dtype is None else dtype converted = ragged_tensor.convert_to_tensor_or_ragged_tensor( value, dtype, preferred_dtype) self.assertEqual(value.ragged_rank, converted.ragged_rank) self.assertEqual(dtypes.as_dtype(expected_dtype), converted.dtype) self.assertAllEqual(value, converted) @parameterized.parameters([ dict( value=ragged_factory_ops.constant_value([['a', 'b'], ['c']], dtype=str), dtype=dtypes.int32, message=r"invalid literal for int\(\) with base 10: 'a'"), ]) def testConvertRaggedTensorValueError(self, value, message, dtype=None, preferred_dtype=None): with self.assertRaisesRegexp(ValueError, message): ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype, preferred_dtype) #============================================================================= # Tests where the 'value' param is a Tensor #============================================================================= @parameterized.parameters([ dict(pylist=[[1, 2], [3, 4]]), dict(pylist=[[1, 2], [3, 4]], preferred_dtype=dtypes.float32), dict(pylist=[[1, 2], [3, 4]], preferred_dtype=dtypes.string), ]) def testConvertTensor(self, pylist, dtype=None, preferred_dtype=None): tensor = constant_op.constant(pylist) converted = ragged_tensor.convert_to_tensor_or_ragged_tensor( tensor, dtype, preferred_dtype) self.assertIs(tensor, converted) @parameterized.parameters([ dict( pylist=[[1, 2], [3, 4]], dtype=dtypes.float32, message=('Tensor conversion requested dtype float32 for ' 'Tensor with dtype int32')), dict( pylist=[[1, 2], [3, 4]], dtype=dtypes.string, message=('Tensor conversion requested dtype string for ' 'Tensor with dtype int32')), ]) def testConvertTensorError(self, pylist, message, dtype=None, preferred_dtype=None): tensor = constant_op.constant(pylist) with self.assertRaisesRegexp(ValueError, message): ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, dtype, preferred_dtype) #============================================================================= # Tests where the 'value' param is a np.array #============================================================================= @parameterized.parameters([ dict( value=np.array([[1, 2], [3, 4]], dtype=np.int32), expected_dtype=dtypes.int32), dict( value=np.array([[b'a', b'b'], [b'c', b'd']]), expected_dtype=dtypes.string), dict( value=np.array([[1, 2], [3, 4]], dtype=np.int32), dtype=dtypes.float32, expected_dtype=dtypes.float32), dict( value=np.array([[1, 2], [3, 4]], dtype=np.int32), preferred_dtype=dtypes.float32, expected_dtype=dtypes.float32), dict( value=np.array([[1, 2], [3, 4]], dtype=np.int32), preferred_dtype=dtypes.string, expected_dtype=dtypes.int32), ]) def testConvertNumpyArray(self, value, dtype=None, preferred_dtype=None, expected_dtype=None): if expected_dtype is None: expected_dtype = value.dtype if dtype is None else dtype converted = ragged_tensor.convert_to_tensor_or_ragged_tensor( value, dtype, preferred_dtype) self.assertEqual(dtypes.as_dtype(expected_dtype), converted.dtype) self.assertAllEqual(value, converted) @parameterized.parameters([ dict( value=np.array([['a', 'b'], ['c', 'd']], dtype=str), dtype=dtypes.int32, message=r"invalid literal for int\(\) with base 10: 'a'"), ]) def testConvertNumpyArrayError(self, value, message, dtype=None, preferred_dtype=None): with self.assertRaisesRegexp(ValueError, message): ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype, preferred_dtype) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the segment_id_ops.segment_ids_to_row_splits() op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSplitsToSegmentIdsOpTest(test_util.TensorFlowTestCase): def testDocStringExample(self): segment_ids = [0, 0, 0, 2, 2, 3, 4, 4, 4] expected = [0, 3, 3, 5, 6, 9] splits = segment_id_ops.segment_ids_to_row_splits(segment_ids) self.assertAllEqual(splits, expected) def testEmptySegmentIds(self): # Note: the splits for an empty ragged tensor contains a single zero. segment_ids = segment_id_ops.segment_ids_to_row_splits([]) self.assertAllEqual(segment_ids, [0]) def testErrors(self): self.assertRaisesRegexp(TypeError, r'segment_ids must be an integer tensor.*', segment_id_ops.segment_ids_to_row_splits, constant_op.constant([0.5])) self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1', segment_id_ops.segment_ids_to_row_splits, 0) self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1', segment_id_ops.segment_ids_to_row_splits, [[0]]) def testNumSegments(self): segment_ids = [0, 0, 0, 2, 2, 3, 4, 4, 4] num_segments = 7 expected = [0, 3, 3, 5, 6, 9, 9, 9] splits = segment_id_ops.segment_ids_to_row_splits(segment_ids, num_segments) self.assertAllEqual(splits, expected) def testUnsortedSegmentIds(self): # Segment ids are not required to be sorted. segment_ids = [0, 4, 3, 2, 4, 4, 2, 0, 0] splits1 = segment_id_ops.segment_ids_to_row_splits(segment_ids) expected1 = [0, 3, 3, 5, 6, 9] splits2 = segment_id_ops.segment_ids_to_row_splits(segment_ids, 7) expected2 = [0, 3, 3, 5, 6, 9, 9, 9] self.assertAllEqual(splits1, expected1) self.assertAllEqual(splits2, expected2) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Concat and stack operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util.tf_export import tf_export def concat(values, axis, name=None): """Concatenates potentially ragged tensors along one dimension. Given a list of tensors with the same rank `K` (`K >= axis`), returns a rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the concatenation of `[rt[i0...iaxis] for rt in values]`. Args: values: A list of potentially ragged tensors. May not be empty. All `values` must have the same rank and the same dtype; but unlike `tf.concat`, they can have arbitrary shapes. axis: A python integer, indicating the dimension along which to concatenate. (Note: Unlike `tf.concat`, the `axis` parameter must be statically known.) Negative values are supported only if the rank of at least one `values` value is statically known. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` with rank `K`. `result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`. Raises: ValueError: If `values` is empty, if `axis` is out of bounds or if the input tensors have different ranks. #### Example: ```python >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> ragged.concat([t1, t2], axis=0) [[1, 2], [3, 4, 5], [6], [7, 8, 9]] >>> ragged.concat([t1, t2], axis=1) [[1, 2, 6], [3, 4, 5, 7, 8, 9]] ``` """ if not isinstance(values, (list, tuple)): values = [values] with ops.name_scope(name, 'RaggedConcat', values): return _ragged_stack_concat_helper(values, axis, stack_values=False) @tf_export('ragged.stack') def stack(values, axis=0, name=None): """Stacks a list of rank-`R` tensors into one rank-`(R+1)` `RaggedTensor`. Given a list of tensors or ragged tensors with the same rank `R` (`R >= axis`), returns a rank-`R+1` `RaggedTensor` `result` such that `result[i0...iaxis]` is `[value[i0...iaxis] for value in values]`. #### Example: ```python >>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]]) >>> t2 = tf.ragged.constant([[6], [7, 8, 9]]) >>> tf.ragged.stack([t1, t2], axis=0) [[[1, 2], [3, 4, 5]], [[6], [7, 9, 0]]] >>> tf.ragged.stack([t1, t2], axis=1) [[[1, 2], [6]], [[3, 4, 5], [7, 8, 9]]] ``` Args: values: A list of `tf.Tensor` or `tf.RaggedTensor`. May not be empty. All `values` must have the same rank and the same dtype; but unlike `tf.stack`, they can have arbitrary dimension sizes. axis: A python integer, indicating the dimension along which to stack. (Note: Unlike `tf.stack`, the `axis` parameter must be statically known.) Negative values are supported only if the rank of at least one `values` value is statically known. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` with rank `R+1`. `result.ragged_rank=1+max(axis, max(rt.ragged_rank for rt in values]))`. Raises: ValueError: If `values` is empty, if `axis` is out of bounds or if the input tensors have different ranks. """ if not isinstance(values, (list, tuple)): values = [values] with ops.name_scope(name, 'RaggedConcat', values): return _ragged_stack_concat_helper(values, axis, stack_values=True) def _ragged_stack_concat_helper(rt_inputs, axis, stack_values): """Helper function to concatenate or stack ragged tensors. Args: rt_inputs: A list of RaggedTensors or Tensors to combine. axis: The axis along which to concatenate or stack. stack_values: A boolean -- if true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor. Raises: ValueError: If rt_inputs is empty, or if axis is out of range. """ # Validate parameters. if not rt_inputs: raise ValueError('rt_inputs may not be empty.') # Convert input tensors. rt_inputs = [ ragged_tensor.convert_to_tensor_or_ragged_tensor( rt_input, name='rt_input') for rt_input in rt_inputs ] row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes( *rt_inputs, return_dtype=True) rt_inputs = list(rt_inputs) # Special case: if there's only one input, then return it as-is. if len(rt_inputs) == 1: if stack_values: return ragged_array_ops.expand_dims(rt_inputs[0], axis=axis) else: return rt_inputs[0] # Check the rank (number of dimensions) of the input tensors. ndims = None for rt in rt_inputs: if ndims is None: ndims = rt.shape.ndims else: rt.shape.assert_has_rank(ndims) out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1 axis = ragged_util.get_positive_axis(axis, out_ndims) # If all the inputs are Tensors, and we're combining the final dimension, # then we can delegate to the tf.stack/tf.concat operation, and return a # Tensor. if all(not ragged_tensor.is_ragged(rt) for rt in rt_inputs): if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1): if stack_values: return array_ops.stack(rt_inputs, axis) else: return array_ops.concat(rt_inputs, axis) # Convert any Tensor inputs to RaggedTensors. This makes it # possible to concatenate Tensors and RaggedTensors together. for i in range(len(rt_inputs)): if not ragged_tensor.is_ragged(rt_inputs[i]): rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor( rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype) # Convert the input tensors to all have the same ragged_rank. ragged_rank = max(max(rt.ragged_rank for rt in rt_inputs), 1) rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype) for rt in rt_inputs] if axis == 0: return _ragged_stack_concat_axis_0(rt_inputs, stack_values) elif axis == 1: return _ragged_stack_concat_axis_1(rt_inputs, stack_values) else: # axis > 1: recurse. values = [rt.values for rt in rt_inputs] splits = [[rt_input.row_splits] for rt_input in rt_inputs] with ops.control_dependencies(ragged_util.assert_splits_match(splits)): return ragged_tensor.RaggedTensor.from_row_splits( _ragged_stack_concat_helper(values, axis - 1, stack_values), splits[0][0], validate=False) def _ragged_stack_concat_axis_0(rt_inputs, stack_values): """Helper function to concatenate or stack ragged tensors along axis 0. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor. """ # Concatenate the inner values together. flat_values = [rt.flat_values for rt in rt_inputs] concatenated_flat_values = array_ops.concat(flat_values, axis=0) # Concatenate the splits together for each ragged dimension (adjusting # split offsets as necessary). nested_splits = [rt.nested_row_splits for rt in rt_inputs] ragged_rank = rt_inputs[0].ragged_rank concatenated_nested_splits = [ _concat_ragged_splits([ns[dim] for ns in nested_splits]) for dim in range(ragged_rank) ] # If we are performing a stack operation, then add another splits. if stack_values: stack_lengths = array_ops.stack([rt.nrows() for rt in rt_inputs]) stack_splits = ragged_util.lengths_to_splits(stack_lengths) concatenated_nested_splits.insert(0, stack_splits) return ragged_tensor.RaggedTensor.from_nested_row_splits( concatenated_flat_values, concatenated_nested_splits, validate=False) def _ragged_stack_concat_axis_1(rt_inputs, stack_values): """Helper function to concatenate or stack ragged tensors along axis 1. Args: rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank. stack_values: Boolean. If true, then stack values; otherwise, concatenate them. Returns: A RaggedTensor. """ num_inputs = len(rt_inputs) rt_nrows = rt_inputs[0].nrows() nrows_msg = 'Input tensors have incompatible shapes.' nrows_checks = [ check_ops.assert_equal(rt.nrows(), rt_nrows, message=nrows_msg) for rt in rt_inputs[1:] ] with ops.control_dependencies(nrows_checks): # Concatentate the inputs together to put them in a single ragged tensor. concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False) # Use ragged.gather to permute the rows of concatenated_rt. In particular, # permuted_rt = [rt_inputs[0][0], ..., rt_inputs[N][0], # rt_inputs[0][1], ..., rt_inputs[N][1], # ..., # rt_inputs[0][M], ..., rt_input[N][M]] # where `N=num_inputs-1` and `M=rt_nrows-1`. row_indices = math_ops.range(rt_nrows * num_inputs) row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1]) transposed_row_index_matrix = array_ops.transpose(row_index_matrix) row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1]) permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation) if stack_values: # Add a new splits tensor to group together the values. stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs) _copy_row_shape(rt_inputs, stack_splits) return ragged_tensor.RaggedTensor.from_row_splits( permuted_rt, stack_splits, validate=False) else: # Merge together adjacent rows by dropping the row-split indices that # separate them. concat_splits = permuted_rt.row_splits[::num_inputs] _copy_row_shape(rt_inputs, concat_splits) return ragged_tensor.RaggedTensor.from_row_splits( permuted_rt.values, concat_splits, validate=False) def _copy_row_shape(rt_inputs, splits): """Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs.""" for rt in rt_inputs: if rt.shape[0] is not None: splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1)) def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype): """Adds ragged dimensions to `rt_input` so it has the desired ragged rank.""" if ragged_rank > 0: if not ragged_tensor.is_ragged(rt_input): rt_input = ragged_tensor.RaggedTensor.from_tensor( rt_input, row_splits_dtype=row_splits_dtype) if rt_input.ragged_rank < ragged_rank: rt_input = rt_input.with_values( _increase_ragged_rank_to(rt_input.values, ragged_rank - 1, row_splits_dtype)) return rt_input def _concat_ragged_splits(splits_list): """Concatenates a list of RaggedTensor splits to form a single splits.""" pieces = [splits_list[0]] splits_offset = splits_list[0][-1] for splits in splits_list[1:]: pieces.append(splits[1:] + splits_offset) splits_offset += splits[-1] return array_ops.concat(pieces, axis=0)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_concat_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.to_sparse op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase): def testDocStringExample(self): rt = ragged_factory_ops.constant([[1, 2, 3], [4], [], [5, 6]]) st = self.evaluate(rt.to_sparse()) self.assertAllEqual(st.indices, [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]]) self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6]) self.assertAllEqual(st.dense_shape, [4, 3]) def test2DRaggedTensorWithOneRaggedDimension(self): rt = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']]) st = self.evaluate(rt.to_sparse()) self.assertAllEqual( st.indices, [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0], [4, 0]]) self.assertAllEqual(st.values, b'a b c d e f g'.split()) self.assertAllEqual(st.dense_shape, [5, 3]) def test3DRaggedTensorWithOneRaggedDimension(self): rt = ragged_factory_ops.constant( [[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]], [[11, 12]], [], [[13, 14]] ], ragged_rank=1) st = self.evaluate(rt.to_sparse()) self.assertAllEqual(st.indices, [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 2, 0], [1, 2, 1], [2, 0, 0], [2, 0, 1], [4, 0, 0], [4, 0, 1]]) self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) self.assertAllEqual(st.dense_shape, [5, 3, 2]) def test4DRaggedTensorWithOneRaggedDimension(self): rt = ragged_factory_ops.constant( [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [], [[[9, 10], [11, 12]]]], ragged_rank=1) st = self.evaluate(rt.to_sparse()) self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) self.assertAllEqual( st.indices, [ [0, 0, 0, 0], # index for value=1 [0, 0, 0, 1], # index for value=2 [0, 0, 1, 0], # index for value=3 [0, 0, 1, 1], # index for value=4 [0, 1, 0, 0], # index for value=5 [0, 1, 0, 1], # index for value=6 [0, 1, 1, 0], # index for value=7 [0, 1, 1, 1], # index for value=8 [2, 0, 0, 0], # index for value=9 [2, 0, 0, 1], # index for value=10 [2, 0, 1, 0], # index for value=11 [2, 0, 1, 1], # index for value=12 ]) self.assertAllEqual(st.dense_shape, [3, 2, 2, 2]) def test4DRaggedTensorWithTwoRaggedDimensions(self): rt = ragged_factory_ops.constant( [[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]]], [[[11, 12]], [], [[13, 14]]], []], ragged_rank=2) st = self.evaluate(rt.to_sparse()) self.assertAllEqual( st.indices, [ [0, 0, 0, 0], # index for value=1 [0, 0, 0, 1], # index for value=2 [0, 0, 1, 0], # index for value=3 [0, 0, 1, 1], # index for value=4 [0, 1, 0, 0], # index for value=5 [0, 1, 0, 1], # index for value=6 [0, 1, 1, 0], # index for value=7 [0, 1, 1, 1], # index for value=8 [0, 1, 2, 0], # index for value=9 [0, 1, 2, 1], # index for value=10 [1, 0, 0, 0], # index for value=11 [1, 0, 0, 1], # index for value=12 [1, 2, 0, 0], # index for value=13 [1, 2, 0, 1], # index for value=14 ]) self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) self.assertAllEqual(st.dense_shape, [3, 3, 3, 2]) def testShape(self): rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]]) st = rt.to_sparse() self.assertEqual(st.indices.shape.as_list(), [7, 2]) self.assertEqual(st.values.shape.as_list(), [7]) self.assertEqual(st.dense_shape.shape.as_list(), [2]) rt = ragged_factory_ops.constant([[[1, 2]], [], [[3, 4]], []], ragged_rank=1) st = rt.to_sparse() self.assertEqual(st.indices.shape.as_list(), [4, 3]) self.assertEqual(st.values.shape.as_list(), [4]) self.assertEqual(st.dense_shape.shape.as_list(), [3]) rt = ragged_factory_ops.constant([[[1], [2, 3, 4, 5, 6, 7]], [[]]]) st = rt.to_sparse() self.assertEqual(st.indices.shape.as_list(), [7, 3]) self.assertEqual(st.values.shape.as_list(), [7]) self.assertEqual(st.dense_shape.shape.as_list(), [3]) def testKernelErrors(self): # An empty vector, defined using a placeholder to ensure that we can't # determine that it's invalid at graph-construction time. empty_vector = array_ops.placeholder_with_default( array_ops.zeros([0], dtypes.int64), shape=None) bad_rt1 = ragged_tensor.RaggedTensor.from_row_splits( row_splits=[2, 3], values=[1, 2, 3], validate=False) bad_split0 = r'First value of ragged splits must be 0.*' with self.assertRaisesRegexp(errors.InvalidArgumentError, bad_split0): self.evaluate(bad_rt1.to_sparse()) bad_rt2 = ragged_tensor.RaggedTensor.from_row_splits( row_splits=[0, 5], values=empty_vector, validate=False) bad_rt3 = ragged_tensor.RaggedTensor.from_row_splits( row_splits=[0, 1], values=ragged_tensor.RaggedTensor.from_row_splits( row_splits=[0, 5], values=empty_vector, validate=False), validate=False) split_mismatch1_error = r'Final value of ragged splits must match.*' for rt in [bad_rt2, bad_rt3]: with self.assertRaisesRegexp(errors.InvalidArgumentError, split_mismatch1_error): self.evaluate(rt.to_sparse()) bad_rt4 = ragged_tensor.RaggedTensor.from_row_splits( row_splits=[0, 5], values=ragged_tensor.RaggedTensor.from_row_splits( row_splits=[0], values=empty_vector, validate=False), validate=False) split_mismatch2_error = r'Final value of ragged splits must match.*' with self.assertRaisesRegexp(errors.InvalidArgumentError, split_mismatch2_error): self.evaluate(bad_rt4.to_sparse()) bad_rt5 = ragged_tensor.RaggedTensor.from_row_splits( row_splits=empty_vector, values=[], validate=False) empty_splits_error = (r'ragged splits may not be empty.*') with self.assertRaisesRegexp(errors.InvalidArgumentError, empty_splits_error): self.evaluate(bad_rt5.to_sparse()) def testGradient(self): if context.executing_eagerly(): return # rt1.shape == rt2.shape == [2, (D2), (D3), 2]. rt1 = ragged_factory_ops.constant( [[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0]]]], ragged_rank=2) rt2 = ragged_factory_ops.constant( [[[[9.0, 8.0], [7.0, 6.0]], [[5.0, 4.0]]]], ragged_rank=2) rt = ragged_functional_ops.map_flat_values(math_ops.add, rt1, rt2 * 2.0) st = rt.to_sparse() g1, g2 = gradients_impl.gradients(st.values, [rt1.flat_values, rt2.flat_values]) self.assertAllEqual(g1, [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]) self.assertAllEqual(g2, [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.boolean_mask.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedBooleanMaskOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): # Define short constants for true & false, so the data & mask can be lined # up in the examples below. This makes it easier to read the examples, to # see which values should be kept vs. masked. T = True F = False @parameterized.parameters([ #========================================================================= # Docstring examples #========================================================================= dict( descr='Docstring example 1', data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], mask=[[T, F, T], [F, F, F], [T, F, F]], expected=ragged_factory_ops.constant_value([[1, 3], [], [7]])), dict( descr='Docstring example 2', data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]), mask=ragged_factory_ops.constant_value([[F, F, T], [F], [T, T]]), expected=ragged_factory_ops.constant_value([[3], [], [5, 6]])), dict( descr='Docstring example 3', data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]), mask=[True, False, True], expected=ragged_factory_ops.constant_value([[1, 2, 3], [5, 6]])), #========================================================================= # Uniform data and uniform mask. #========================================================================= dict( descr='data.shape=[7]; mask.shape=[7]', data=[1, 2, 3, 4, 5, 6, 7], mask=[T, F, T, T, F, F, F], expected=[1, 3, 4]), dict( descr='data.shape=[5, 3]; mask.shape=[5]', data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]], mask=[True, False, True, True, False], expected=[[1, 2, 3], [7, 8, 9], [10, 11, 12]]), dict( descr='data.shape=[5, 3]; mask.shape=[5, 3]', data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2], [3, 4, 5]], mask=[[F, F, F], [T, F, T], [T, T, T], [F, F, F], [T, T, F]], expected=ragged_factory_ops.constant_value( [[], [4, 6], [7, 8, 9], [], [3, 4]])), dict( descr='data.shape=[3, 2, 2]; mask.shape=[3]', data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]], mask=[F, F, T], expected=[[[2, 4], [6, 8]]]), dict( descr='data.shape=[3, 2, 2]; mask.shape=[3]', data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]], mask=[F, F, T], expected=[[[2, 4], [6, 8]]]), dict( descr='data.shape=[3, 2, 2]; mask.shape=[3, 2]', data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]], mask=[[T, F], [T, T], [F, F]], expected=ragged_factory_ops.constant_value( [[[1, 2]], [[5, 6], [7, 8]], []], ragged_rank=1)), dict( descr='data.shape=[3, 2, 2]; mask.shape=[3, 2, 2]', data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]], mask=[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F], [T, T]]], expected=ragged_factory_ops.constant_value( [[[1, 2], [4]], [[], []], [[2], [6, 8]]])), dict( descr='data.shape=mask.shape=[2, 2, 2, 2]', data=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 3], [5, 7]]]], mask=[[[[T, T], [F, F]], [[T, F], [F, F]]], [[[F, F], [F, F]], [[T, T], [T, F]]]], expected=ragged_factory_ops.constant_value( [[[[1, 2], []], [[5], []]], [[[], []], [[1, 3], [5]]]])), #========================================================================= # Ragged data and ragged mask. #========================================================================= dict( descr='data.shape=[5, (D2)]; mask.shape=[5, (D2)]', data=ragged_factory_ops.constant_value( [[1, 2], [3, 4, 5, 6], [7, 8, 9], [], [1, 2, 3]]), mask=ragged_factory_ops.constant_value( [[F, F], [F, T, F, T], [F, F, F], [], [T, F, T]]), expected=ragged_factory_ops.constant_value( [[], [4, 6], [], [], [1, 3]])), dict( descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2)]', data=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]]), mask=ragged_factory_ops.constant_value([[T, F], [T, T], [F, F]]), expected=ragged_factory_ops.constant_value( [[[1, 2]], [[5, 6], [7, 8]], []])), dict( descr='data.shape=[3, (D2), D3]; mask.shape=[3, (D2)]', data=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [[5, 6], [7, 8], [2, 4]], [[6, 8]]], ragged_rank=1), mask=ragged_factory_ops.constant_value([[T, F], [T, T, F], [F]]), expected=ragged_factory_ops.constant_value( [[[1, 2]], [[5, 6], [7, 8]], []], ragged_rank=1)), dict( descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2), (D3)]', data=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4]]]), mask=ragged_factory_ops.constant_value( [[[T, T], [F, T]], [[F, F], [F, F]], [[T, F]]]), expected=ragged_factory_ops.constant_value( [[[1, 2], [4]], [[], []], [[2]]])), dict( descr=('data.shape=[3, (D2), (D3), (D4)]; ' 'mask.shape=[3, (D2), (D3), (D4)]'), data=ragged_factory_ops.constant_value( [[[[1, 2], [3, 4]], [[5, 6]]], [[[2, 4], [6, 8]]]]), mask=ragged_factory_ops.constant_value( [[[[T, T], [F, F]], [[T, F]]], [[[F, F], [T, T]]]]), expected=ragged_factory_ops.constant_value( [[[[1, 2], []], [[5]]], [[[], [6, 8]]]])), #========================================================================= # Ragged mask and uniform data #========================================================================= dict( descr='data.shape=[2, 3]; mask.shape=[2, (3)]', data=[[1, 2, 3], [4, 5, 6]], mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]), expected=ragged_factory_ops.constant_value([[1], [5, 6]])), dict( descr='data.shape=[2, 3, 2]; mask.shape=[2, (3)]', data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]], mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]), expected=ragged_factory_ops.constant_value( [[[1, 2]], [[9, 0], [2, 4]]], ragged_rank=1)), dict( descr='data.shape=[2, 3, 2]; mask.shape=[2, (3), 2]', data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]], mask=ragged_factory_ops.constant_value( [[[T, F], [F, F], [T, T]], [[T, F], [F, T], [F, F]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[1], [], [5, 6]], [[7], [0], []]])), #========================================================================= # Ragged data and uniform mask. #========================================================================= dict( descr='data.shape=[4, (D2)]; mask.shape=[4]', data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [], [5, 6]]), mask=[T, F, T, F], expected=ragged_factory_ops.constant_value([[1, 2, 3], []])), dict( descr='data.shape=[4, (D2), (D3)]; mask.shape=[4]', data=ragged_factory_ops.constant_value( [[[1, 2, 3]], [[4], []], [[5, 6]], []]), mask=[T, F, T, T], expected=ragged_factory_ops.constant_value( [[[1, 2, 3]], [[5, 6]], []])), dict( descr='data.shape=[4, (D2), 2]; mask.shape=[4]', data=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]], ragged_rank=1), mask=[T, F, F, T], expected=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]], ragged_rank=1)), dict( descr='data.shape=[4, (D2), 2]; mask.shape=[4]', data=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]], ragged_rank=1), mask=[T, F, F, T], expected=ragged_factory_ops.constant_value( [[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]], ragged_rank=1)), dict( descr='data.shape=[1, (2)]; mask.shape=[1, 2]', data=ragged_factory_ops.constant_value([[1, 2]]), mask=[[T, F]], expected=ragged_factory_ops.constant_value([[1]])), dict( descr='data.shape=[2, (2), (D3)]; mask.shape=[2, 2]', data=ragged_factory_ops.constant_value( [[[1], [2, 3]], [[], [4, 5, 6]]]), mask=[[T, F], [T, T]], expected=ragged_factory_ops.constant_value([[[1]], [[], [4, 5, 6]]])), dict( descr='data.shape=[2, (2), 3]; mask.shape=[2, 2]', data=ragged_factory_ops.constant_value( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]], ragged_rank=1), mask=[[T, F], [T, T]], expected=ragged_factory_ops.constant_value( [[[1, 2, 3]], [[7, 8, 9], [2, 4, 6]]], ragged_rank=1)), dict( descr='data.shape=[2, (2), 3]; mask.shape=[2, 2, 3]', data=ragged_factory_ops.constant_value( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]], ragged_rank=1), mask=[[[T, F, F], [T, F, T]], [[T, F, T], [F, F, F]]], expected=ragged_factory_ops.constant_value( [[[1], [4, 6]], [[7, 9], []]])), ]) # pyformat: disable def testBooleanMask(self, descr, data, mask, expected): actual = ragged_array_ops.boolean_mask(data, mask) self.assertAllEqual(actual, expected) def testErrors(self): if not context.executing_eagerly(): self.assertRaisesRegexp(ValueError, r'mask\.shape\.ndims must be known statically', ragged_array_ops.boolean_mask, [[1, 2]], array_ops.placeholder(dtypes.bool)) self.assertRaises(TypeError, ragged_array_ops.boolean_mask, [[1, 2]], [[0, 1]]) self.assertRaisesRegexp( ValueError, 'Tensor conversion requested dtype bool for ' 'RaggedTensor with dtype int32', ragged_array_ops.boolean_mask, ragged_factory_ops.constant([[1, 2]]), ragged_factory_ops.constant([[0, 0]])) self.assertRaisesRegexp( ValueError, r'Shapes \(1, 2\) and \(1, 3\) are incompatible', ragged_array_ops.boolean_mask, [[1, 2]], [[True, False, True]]) self.assertRaisesRegexp(errors.InvalidArgumentError, r'Inputs must have identical ragged splits', ragged_array_ops.boolean_mask, ragged_factory_ops.constant([[1, 2]]), ragged_factory_ops.constant([[True, False, True]])) self.assertRaisesRegexp(ValueError, 'mask cannot be scalar', ragged_array_ops.boolean_mask, [[1, 2]], True) self.assertRaisesRegexp(ValueError, 'mask cannot be scalar', ragged_array_ops.boolean_mask, ragged_factory_ops.constant([[1, 2]]), True) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operator Squeeze for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor def squeeze(input, axis=None, name=None): # pylint: disable=redefined-builtin """Ragged compatible squeeze. If `input` is a `tf.Tensor`, then this calls `tf.squeeze`. If `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time, where `N` is the number of elements in the squeezed dimensions. Args: input: A potentially ragged tensor. The input to squeeze. axis: An optional list of ints. Defaults to `None`. If the `input` is ragged, it only squeezes the dimensions listed. It fails if `input` is ragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note that it is an error to squeeze a dimension that is not 1. It must be in the range of [-rank(input), rank(input)). name: A name for the operation (optional). Returns: A potentially ragged tensor. Contains the same data as input, but has one or more dimensions of size 1 removed. """ with ops.name_scope(name, 'RaggedSqueeze', [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) if isinstance(input, ops.Tensor): return array_ops.squeeze(input, axis, name) if axis is None: raise ValueError('Ragged.squeeze must have an axis argument.') if isinstance(axis, int): axis = [axis] elif ((not isinstance(axis, (list, tuple))) or (not all(isinstance(d, int) for d in axis))): raise TypeError('Axis must be a list or tuple of integers.') dense_dims = [] ragged_dims = [] # Normalize all the dims in axis to be positive axis = [ragged_util.get_positive_axis(d, input.shape.ndims) for d in axis] for dim in axis: if dim > input.ragged_rank: dense_dims.append(dim - input.ragged_rank) else: ragged_dims.append(dim) # Make sure the specified ragged dimensions are squeezable. assertion_list = [] scalar_tensor_one = constant_op.constant(1, dtype=input.row_splits.dtype) for i, r in enumerate(input.nested_row_lengths()): if i + 1 in ragged_dims: assertion_list.append( control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(r, scalar_tensor_one)), ['the given axis (axis = %d) is not squeezable!' % (i + 1)])) if 0 in ragged_dims: scalar_tensor_two = constant_op.constant(2, dtype=dtypes.int32) assertion_list.append( control_flow_ops.Assert( math_ops.equal( array_ops.size(input.row_splits), scalar_tensor_two), ['the given axis (axis = 0) is not squeezable!'])) # Till now, we are sure that the ragged dimensions are squeezable. squeezed_rt = None squeezed_rt = control_flow_ops.with_dependencies(assertion_list, input.flat_values) if dense_dims: # Gives error if the dense dimension is not squeezable. squeezed_rt = array_ops.squeeze(squeezed_rt, dense_dims) remaining_row_splits = [] remaining_row_splits = list() for i, row_split in enumerate(input.nested_row_splits): # each row_splits tensor is for dimension #(i+1) . if (i + 1) not in ragged_dims: remaining_row_splits.append(row_split) # Take care of the first row if it is to be squeezed. if remaining_row_splits and 0 in ragged_dims: remaining_row_splits.pop(0) squeezed_rt = RaggedTensor.from_nested_row_splits(squeezed_rt, remaining_row_splits) # Corner case: when removing all the ragged dimensions and the output is # a scalar tensor e.g. ragged.squeeze(ragged.constant([[[1]]])). if set(range(0, input.ragged_rank + 1)).issubset(set(ragged_dims)): squeezed_rt = array_ops.squeeze(squeezed_rt, [0], name) return squeezed_rt
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_squeeze_op.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_map_ops.map_fn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.keras import backend from tensorflow.python.ops import array_ops from tensorflow.python.ops import map_fn as map_fn_lib from tensorflow.python.ops import math_ops as mo from tensorflow.python.ops import string_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedMapOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # The following test sets map over a RaggedTensor and apply a # transformation that returns with shape: # [d1, (d2)] -> [d1] dict( fn=mo.reduce_mean, elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[2, 4, 6], ), dict( fn=string_ops.reduce_join, elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']], expected_output=[b'foobarbaz', b'a', b'bc'], dtype=dtypes.string, ), # [d1, (d2)] -> [d1, 2] dict( fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]), # fn=self.stack_mean_and_sum, elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[2, 6], [4.5, 9], [6.5, 13]], dtype=dtypes.float32, expected_ragged_rank=0, ), # [d1, (d2)] -> [d1, (d2)] dict( fn=lambda x: x + np.int64(1), elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[2, 3, 4], [5, 6], [7, 8]], dtype=dtypes.int64, result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), d3] -> [d1, (d2), d3] dict( fn=lambda x: x + np.int64(1), elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], elems_ragged_rank=1, expected_ragged_rank=1, result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]], ), # [d1, (d2)] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]), elems=[[1, 2, 3], [4, 5], [6, 7]], expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3)] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged_functional_ops.map_flat_values(mo.add, x, 1), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3)] -> [d1, (d2)] dict( fn=lambda x: ragged_math_ops.reduce_sum(x, axis=1), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[6], [9, 13]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), (d3)] -> [d1, (d3)] dict( fn=lambda x: ragged_math_ops.reduce_sum(x, axis=0), elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[[1, 2, 3], [10, 12]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), ), # [d1, (d2), (d3)] -> [d1] dict( fn=ragged_math_ops.reduce_sum, elems=[[[1, 2, 3]], [[4, 5], [6, 7]]], expected_output=[6, 22], result_dtype=dtypes.int64, ), # [d1] -> [d1, (d2)] dict( fn=mo.range, elems=[4, 0, 2], expected_output=[[0, 1, 2, 3], [], [0, 1]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=1), ), # [d1] -> [d1, (d2), (d3)] dict( fn=lambda x: ragged_math_ops.range(mo.range(x)), elems=[5, 0, 3], expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [], [[], [0], [0, 1]]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=2), ), # [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)] dict( fn=lambda x: x + np.int64(1), elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]], expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9], []]]]], result_dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=4), ), ]) def testRaggedMap( self, fn, elems, expected_output, expected_ragged_rank=None, result_ragged_rank=None, elems_ragged_rank=None, dtype=dtypes.int64, result_dtype=None, infer_shape=False, ): elems = ragged_factory_ops.constant(elems, dtype, elems_ragged_rank) output = ragged_map_ops.map_fn( fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape) expected_rt = ragged_factory_ops.constant( expected_output, ragged_rank=expected_ragged_rank) self.assertAllEqual(expected_rt, output) def testRaggedMapOnStructure(self): batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _reduce_sum_from_all(f): return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin']) output = ragged_map_ops.map_fn( fn=_reduce_sum_from_all, elems=features, dtype=dtypes.int32, ) self.assertAllEqual(output, [66, 44, 198]) # Test mapping over a dict of RTs can produce a dict of RTs. def testRaggedMapOnStructure_RaggedOutputs(self): batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]]) # [[10, 20, 30], [40], [50, 60, 70]] robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10) features = {'batman': batman, 'robin': robin} def _increment(f): return { 'batman': f['batman'] + 1, 'robin': f['robin'] + 1, } output = ragged_map_ops.map_fn( fn=_increment, elems=features, infer_shape=False, dtype={ 'batman': ragged_tensor.RaggedTensorType( dtype=dtypes.int32, ragged_rank=1), 'robin': ragged_tensor.RaggedTensorType( dtype=dtypes.int32, ragged_rank=1) }, ) self.assertAllEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]]) self.assertAllEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]]) def testZip(self): x = ragged_factory_ops.constant( [[10, 20], [30, 40], [50, 60], [70], [80, 90, 100]], dtypes.int64) y = array_ops.expand_dims(mo.range(x.nrows(out_type=dtypes.int64)), axis=1) def _zip(foo): y_val, x_val = foo bar = backend.tile(y_val, array_ops.shape(x_val)) return array_ops.stack([bar, x_val], axis=1) output = ragged_map_ops.map_fn( _zip, (y, x), dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1), infer_shape=False) self.assertAllEqual( output, [[[0, 10], [0, 20]], [[1, 30], [1, 40]], [[2, 50], [2, 60]], [[3, 70]], [[4, 80], [4, 90], [4, 100]]]) def testBatchGather(self): tokens = ragged_factory_ops.constant([['hello', '.', 'there'], ['merhaba'], ['bonjour', '.', 'ca va', '?']]) indices = ragged_factory_ops.constant([[0, 2], [0], [0, 2]]) def gather(x): tokens_val, indices_val = x return array_ops.gather(tokens_val, indices_val) data = tokens, indices out = ragged_map_ops.map_fn( gather, data, dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.string, ragged_rank=1), infer_shape=False) self.assertAllEqual( out, [[b'hello', b'there'], [b'merhaba'], [b'bonjour', b'ca va']]) def testMismatchRaggedRank(self): elems = ragged_factory_ops.constant([[[1, 2, 3]], [[4, 5], [6, 7]]]) fn = lambda x: ragged_math_ops.reduce_sum(x, axis=0) with self.assertRaisesWithLiteralMatch( ValueError, r'The declared ragged rank (23) mismatches the result (1)'): _ = ragged_map_ops.map_fn( fn, elems, dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=23)) def testMismatchRaggedRank2(self): elems = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [6, 7]]) fn = lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]) with self.assertRaisesWithLiteralMatch( ValueError, r'The declared ragged rank (10) mismatches the result (2)'): _ = ragged_map_ops.map_fn( fn, elems, dtype=ragged_tensor.RaggedTensorType( dtype=dtypes.int64, ragged_rank=10)) def testMapOnSparseTensor(self): s = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [1, 0], [1, 1]], values=[0, 5, 0, 4], dense_shape=[2, 2], ) t2 = ragged_tensor.RaggedTensor.from_sparse(s) id_t2 = ragged_map_ops.map_fn( lambda x: x, t2, ) self.assertAllEqual(id_t2, [[0, 5], [0, 4]]) def testRaggedMapWithIncorrectFnOutputSignature(self): x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) with self.assertRaisesRegex(errors.InvalidArgumentError, 'All flat_values must have compatible shapes'): y = map_fn_lib.map_fn(lambda r: map_fn_lib.map_fn(lambda y: r, r), x) self.evaluate(y) def testNestedRaggedMapWithFnOutputSignature(self): ragged1d = ragged_tensor.RaggedTensorSpec([None], dtypes.int32) ragged2d = ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32) x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]]) # pylint: disable=g-long-lambda y = map_fn_lib.map_fn( lambda r: map_fn_lib.map_fn( lambda y: r, r, fn_output_signature=ragged1d), x, fn_output_signature=ragged2d) expected = [[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], [[1]]] self.assertAllEqual(y, expected) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for ragged tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_config from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util.tf_export import tf_export @tf_export("ragged.map_flat_values") def map_flat_values(op, *args, **kwargs): """Applies `op` to the values of one or more RaggedTensors. Replaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values` tensor, and then calls `op`. Returns a `RaggedTensor` that is constructed from the input `RaggedTensor`s' `nested_row_splits` and the value returned by the `op`. If the input arguments contain multiple `RaggedTensor`s, then they must have identical `nested_row_splits`. Examples: ```python >>> rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]]) >>> ragged.map_flat_values(tf.ones_like, rt).eval().tolist() [[1, 1, 1], [], [1, 1], [1]] >>> ragged.map_flat_values(tf.multiply, rt, rt).eval().tolist() [[1, 4, 9], [], [16, 25], [36]] >>> ragged.map_flat_values(tf.add, rt, 5).eval().tolist() [[6, 7, 8], [], [9, 10], [11]] ``` Args: op: The operation that should be applied to the RaggedTensor `flat_values`. `op` is typically an element-wise operation (such as math_ops.add), but any operation that preserves the size of the outermost dimension can be used. I.e., `shape[0]` of the value returned by `op` must match `shape[0]` of the `RaggedTensor`s' `flat_values` tensors. *args: Arguments for `op`. **kwargs: Keyword arguments for `op`. Returns: A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all input `RaggedTensor`s. Raises: ValueError: If args contains no `RaggedTensors`, or if the `nested_splits` of the input `RaggedTensor`s are not identical. """ # Replace RaggedTensors with their values; and collect the splits tensors # from each RaggedTensor. nested_splits_lists = [] inner_args = _replace_ragged_with_flat_values(args, nested_splits_lists) inner_kwargs = _replace_ragged_with_flat_values(kwargs, nested_splits_lists) if not nested_splits_lists: return op(*args, **kwargs) split_dtypes = set(splits[0].dtype for splits in nested_splits_lists) if len(split_dtypes) > 1: if not ragged_config.auto_cast_partition_dtype(): raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; " "use RaggedTensor.with_row_splits_dtype() to convert " "them to compatible dtypes.") nested_splits_lists = [ [math_ops.cast(s, dtypes.int64) for s in nested_splits] # pylint: disable=g-complex-comprehension for nested_splits in nested_splits_lists] with ops.control_dependencies( ragged_util.assert_splits_match(nested_splits_lists)): # Delegate to op, and then compose the result from the transformed values # and the splits. return ragged_tensor.RaggedTensor.from_nested_row_splits( op(*inner_args, **inner_kwargs), nested_splits_lists[0], validate=False) def _replace_ragged_with_flat_values(value, nested_splits_lists): """Replace RaggedTensors with their flat_values, and record their splits. Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their `flat_values` tensor. Looks inside lists, tuples, and dicts. Appends each `RaggedTensor`'s `nested_splits` to `nested_splits_lists`. Args: value: The value that should be transformed by replacing `RaggedTensors`. nested_splits_lists: An output parameter used to record the `nested_splits` for any `RaggedTensors` that were replaced. Returns: A copy of `value` with nested `RaggedTensors` replaced by their `values`. """ # Base case if ragged_tensor.is_ragged(value): value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value) nested_splits_lists.append(value.nested_row_splits) return value.flat_values # Recursion cases def recurse(v): return _replace_ragged_with_flat_values(v, nested_splits_lists) if isinstance(value, list): return [recurse(v) for v in value] elif isinstance(value, tuple): return tuple(recurse(v) for v in value) elif isinstance(value, dict): return dict((k, recurse(v)) for (k, v) in value.items()) else: return value
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_functional_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the segment_id_ops.row_splits_to_segment_ids() op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSplitsToSegmentIdsOpTest(test_util.TensorFlowTestCase): def testDocStringExample(self): splits = [0, 3, 3, 5, 6, 9] expected = [0, 0, 0, 2, 2, 3, 4, 4, 4] segment_ids = segment_id_ops.row_splits_to_segment_ids(splits) self.assertAllEqual(segment_ids, expected) def testEmptySplits(self): # Note: the splits for an empty ragged tensor contains a single zero. segment_ids = segment_id_ops.row_splits_to_segment_ids([0]) self.assertAllEqual(segment_ids, []) def testErrors(self): self.assertRaisesRegexp(ValueError, r'Invalid row_splits: \[\]', segment_id_ops.row_splits_to_segment_ids, []) self.assertRaisesRegexp( ValueError, r'splits must have dtype int32 or int64', segment_id_ops.row_splits_to_segment_ids, constant_op.constant([0.5])) self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1', segment_id_ops.row_splits_to_segment_ids, 0) self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1', segment_id_ops.row_splits_to_segment_ids, [[0]]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for overloaded RaggedTensor operators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedElementwiseOpsTest(test_util.TensorFlowTestCase): def testOrderingOperators(self): x = ragged_factory_ops.constant([[1, 5], [3]]) y = ragged_factory_ops.constant([[4, 5], [1]]) self.assertAllEqual((x > y), [[False, False], [True]]) self.assertAllEqual((x >= y), [[False, True], [True]]) self.assertAllEqual((x < y), [[True, False], [False]]) self.assertAllEqual((x <= y), [[True, True], [False]]) def testArithmeticOperators(self): x = ragged_factory_ops.constant([[1.0, -2.0], [8.0]]) y = ragged_factory_ops.constant([[4.0, 4.0], [2.0]]) self.assertAllEqual(abs(x), [[1.0, 2.0], [8.0]]) self.assertAllEqual((-x), [[-1.0, 2.0], [-8.0]]) self.assertAllEqual((x + y), [[5.0, 2.0], [10.0]]) self.assertAllEqual((3.0 + y), [[7.0, 7.0], [5.0]]) self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]]) self.assertAllEqual((x - y), [[-3.0, -6.0], [6.0]]) self.assertAllEqual((3.0 - y), [[-1.0, -1.0], [1.0]]) self.assertAllEqual((x + 3.0), [[4.0, 1.0], [11.0]]) self.assertAllEqual((x * y), [[4.0, -8.0], [16.0]]) self.assertAllEqual((3.0 * y), [[12.0, 12.0], [6.0]]) self.assertAllEqual((x * 3.0), [[3.0, -6.0], [24.0]]) self.assertAllEqual((x / y), [[0.25, -0.5], [4.0]]) self.assertAllEqual((y / x), [[4.0, -2.0], [0.25]]) self.assertAllEqual((2.0 / y), [[0.5, 0.5], [1.0]]) self.assertAllEqual((x / 2.0), [[0.5, -1.0], [4.0]]) self.assertAllEqual((x // y), [[0.0, -1.0], [4.0]]) self.assertAllEqual((y // x), [[4.0, -2.0], [0.0]]) self.assertAllEqual((2.0 // y), [[0.0, 0.0], [1.0]]) self.assertAllEqual((x // 2.0), [[0.0, -1.0], [4.0]]) self.assertAllEqual((x % y), [[1.0, 2.0], [0.0]]) self.assertAllEqual((y % x), [[0.0, -0.0], [2.0]]) self.assertAllEqual((2.0 % y), [[2.0, 2.0], [0.0]]) self.assertAllEqual((x % 2.0), [[1.0, 0.0], [0.0]]) def testLogicalOperators(self): a = ragged_factory_ops.constant([[True, True], [False]]) b = ragged_factory_ops.constant([[True, False], [False]]) self.assertAllEqual((~a), [[False, False], [True]]) self.assertAllEqual((a & b), [[True, False], [False]]) self.assertAllEqual((a & True), [[True, True], [False]]) self.assertAllEqual((True & b), [[True, False], [False]]) self.assertAllEqual((a | b), [[True, True], [False]]) self.assertAllEqual((a | False), [[True, True], [False]]) self.assertAllEqual((False | b), [[True, False], [False]]) self.assertAllEqual((a ^ b), [[False, True], [False]]) self.assertAllEqual((a ^ True), [[False, False], [True]]) self.assertAllEqual((True ^ b), [[False, True], [True]]) def testDummyOperators(self): a = ragged_factory_ops.constant([[True, True], [False]]) with self.assertRaisesRegexp(TypeError, 'RaggedTensor may not be used as a boolean.'): bool(a) with self.assertRaisesRegexp(TypeError, 'RaggedTensor may not be used as a boolean.'): if a: pass if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_operators_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.squeeze.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_conversion_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_squeeze_op from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSqueezeTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ { 'input_list': [] }, { 'input_list': [[]], 'squeeze_ranks': [0] }, { 'input_list': [[[[], []], [[], []]]], 'squeeze_ranks': [0] }, ]) def test_passing_empty(self, input_list, squeeze_ranks=None): rt = ragged_squeeze_op.squeeze( ragged_factory_ops.constant(input_list), squeeze_ranks) dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks) self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt) @parameterized.parameters([ { 'input_list': [[1]], 'squeeze_ranks': [0] }, { 'input_list': [[1]], 'squeeze_ranks': [0, 1] }, { 'input_list': [[1, 2]], 'squeeze_ranks': [0] }, { 'input_list': [[1], [2]], 'squeeze_ranks': [1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [1, 3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 1, 3] }, { 'input_list': [[[1], [2]], [[3], [4]]], 'squeeze_ranks': [2] }, { 'input_list': [[1], [2]], 'squeeze_ranks': [-1] }, ]) def test_passing_simple(self, input_list, squeeze_ranks=None): rt = ragged_squeeze_op.squeeze( ragged_factory_ops.constant(input_list), squeeze_ranks) dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks) self.assertAllEqual(ragged_conversion_ops.to_tensor(rt), dt) @parameterized.parameters([ # ragged_conversion_ops.from_tensor does not work for this # {'input_list': [1]}, { 'input_list': [[1]], 'squeeze_ranks': [0] }, { 'input_list': [[1, 2]], 'squeeze_ranks': [0] }, { 'input_list': [[1], [2]], 'squeeze_ranks': [1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 1] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [1, 3] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 1, 3] }, { 'input_list': [[[1], [2]], [[3], [4]]], 'squeeze_ranks': [2] }, ]) def test_passing_simple_from_dense(self, input_list, squeeze_ranks=None): dt = constant_op.constant(input_list) rt = ragged_conversion_ops.from_tensor(dt) rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks) dt_s = array_ops.squeeze(dt, squeeze_ranks) self.assertAllEqual(ragged_conversion_ops.to_tensor(rt_s), dt_s) @parameterized.parameters([ { 'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]], 'output_list': [[[1], [1, 2]], [[], []]], 'squeeze_ranks': [0, 2, 4] }, { 'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]], 'output_list': [[[[[1]], [[1, 2]]]], [[[[]], [[]]]]], 'squeeze_ranks': [0] }, ]) def test_passing_ragged(self, input_list, output_list, squeeze_ranks=None): rt = ragged_factory_ops.constant(input_list) rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks) ref = ragged_factory_ops.constant(output_list) self.assertAllEqual(rt_s, ref) def test_passing_text(self): rt = ragged_factory_ops.constant([[[[[[[['H']], [['e']], [['l']], [['l']], [['o']]], [[['W']], [['o']], [['r']], [['l']], [['d']], [['!']]]]], [[[[['T']], [['h']], [['i']], [['s']]], [[['i']], [['s']]], [[['M']], [['e']], [['h']], [['r']], [['d']], [['a']], [['d']]], [[['.']]]]]]]]) output_list = [[['H', 'e', 'l', 'l', 'o'], ['W', 'o', 'r', 'l', 'd', '!']], [['T', 'h', 'i', 's'], ['i', 's'], ['M', 'e', 'h', 'r', 'd', 'a', 'd'], ['.']]] ref = ragged_factory_ops.constant(output_list) rt_s = ragged_squeeze_op.squeeze(rt, [0, 1, 3, 6, 7]) self.assertAllEqual(rt_s, ref) @parameterized.parameters([ { 'input_list': [[]], 'squeeze_ranks': [1] }, { 'input_list': [[1, 2]], 'squeeze_ranks': [1] }, { 'input_list': [[1], [2]], 'squeeze_ranks': [0] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 2] }, { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [2] }, { 'input_list': [[[1], [2]], [[3], [4]]], 'squeeze_ranks': [0] }, { 'input_list': [[[1], [2]], [[3], [4]]], 'squeeze_ranks': [1] }, { 'input_list': [[], []], 'squeeze_ranks': [1] }, { 'input_list': [[[], []], [[], []]], 'squeeze_ranks': [1] }, ]) def test_failing_InvalidArgumentError(self, input_list, squeeze_ranks): with self.assertRaises(errors.InvalidArgumentError): self.evaluate( ragged_squeeze_op.squeeze( ragged_factory_ops.constant(input_list), squeeze_ranks)) @parameterized.parameters([ { 'input_list': [[]] }, { 'input_list': [[1]] }, { 'input_list': [[1, 2]] }, { 'input_list': [[[1], [2]], [[3], [4]]] }, { 'input_list': [[1]] }, { 'input_list': [[[1], [2]], [[3], [4]]] }, { 'input_list': [[[[12], [11]]]] }, ]) def test_failing_no_squeeze_dim_specified(self, input_list): with self.assertRaises(ValueError): ragged_squeeze_op.squeeze(ragged_factory_ops.constant(input_list)) @parameterized.parameters([ { 'input_list': [[[[12], [11]]]], 'squeeze_ranks': [0, 1, 3] }, ]) def test_failing_axis_is_not_a_list(self, input_list, squeeze_ranks): with self.assertRaises(TypeError): tensor_ranks = constant_op.constant(squeeze_ranks) ragged_squeeze_op.squeeze( ragged_factory_ops.constant(input_list), tensor_ranks) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_squeeze_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.expand_dims.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedExpandDimsOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): # An example 4-d ragged tensor with shape [3, (D2), (D3), 2], and the # expected result calling for expand_dims on each axis. c.f. the table of # expected result shapes in the ragged_array_ops.expand_dims docstring. EXAMPLE4D = [[[[1, 1], [2, 2]], [[3, 3]]], [], [[], [[4, 4], [5, 5], [6, 6]]]] # pyformat: disable EXAMPLE4D_EXPAND_AXIS = { 0: [EXAMPLE4D], 1: [[d0] for d0 in EXAMPLE4D], 2: [[[d1] for d1 in d0] for d0 in EXAMPLE4D], 3: [[[[d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D], 4: [[[[[d3] for d3 in d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D] } @parameterized.parameters([ #========================================================================= # Docstring examples: 2D Ragged Inputs dict(rt_input=[[1, 2], [3]], axis=0, expected=[[[1, 2], [3]]], expected_shape=[1, None, None]), dict(rt_input=[[1, 2], [3]], axis=1, expected=[[[1, 2]], [[3]]], expected_shape=[2, None, None]), dict(rt_input=[[1, 2], [3]], axis=2, expected=[[[1], [2]], [[3]]], expected_shape=[2, None, 1]), #========================================================================= # 2D Tensor Inputs dict(rt_input=[[1, 2], [3, 4], [5, 6]], ragged_rank=0, axis=0, expected=[[[1, 2], [3, 4], [5, 6]]], expected_shape=[1, 3, 2]), dict(rt_input=[[1, 2], [3, 4], [5, 6]], ragged_rank=0, axis=1, expected=[[[1, 2]], [[3, 4]], [[5, 6]]], expected_shape=[3, 1, 2]), dict(rt_input=[[1, 2], [3, 4], [5, 6]], ragged_rank=0, axis=2, expected=[[[1], [2]], [[3], [4]], [[5], [6]]], expected_shape=[3, 2, 1]), #========================================================================= # 4D Ragged Inputs: [3, (D2), (D3), 2] # c.f. the table of expected result shapes in the expand_dims docstring. dict(rt_input=EXAMPLE4D, ragged_rank=2, axis=0, expected=EXAMPLE4D_EXPAND_AXIS[0], expected_shape=[1, None, None, None, 2]), dict(rt_input=EXAMPLE4D, ragged_rank=2, axis=1, expected=EXAMPLE4D_EXPAND_AXIS[1], expected_shape=[3, None, None, None, 2]), dict(rt_input=EXAMPLE4D, ragged_rank=2, axis=2, expected=EXAMPLE4D_EXPAND_AXIS[2], expected_shape=[3, None, None, None, 2]), dict(rt_input=EXAMPLE4D, ragged_rank=2, axis=3, expected=EXAMPLE4D_EXPAND_AXIS[3], expected_shape=[3, None, None, 1, 2]), dict(rt_input=EXAMPLE4D, ragged_rank=2, axis=4, expected=EXAMPLE4D_EXPAND_AXIS[4], expected_shape=[3, None, None, 2, 1]), ]) # pyformat: disable def testRaggedExpandDims(self, rt_input, axis, expected, ragged_rank=None, expected_shape=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) expanded = ragged_array_ops.expand_dims(rt, axis=axis) self.assertEqual(expanded.shape.ndims, rt.shape.ndims + 1) if expected_shape is not None: self.assertEqual(expanded.shape.as_list(), expected_shape) self.assertAllEqual(expanded, expected) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_expand_dims_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_factory_ops.constant.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import ragged from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedConstOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters( #========================================================================= # 0-dimensional tensors. dict(pylist=b'x', expected_shape=()), #========================================================================= # 1-dimensional tensors. dict(pylist=[1, 2, 3], expected_shape=(3,)), #========================================================================= # 2-dimensional tensors. dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)), dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)), #========================================================================= # 3-dimensional tensors. dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], expected_shape=(3, None, None)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], inner_shape=(2,), expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, inner_shape=(2,), expected_shape=(3, None, 2)), # 3-dimensional tensors with numpy arrays dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], expected_shape=(3, None, None)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], inner_shape=(2,), expected_shape=(3, None, 2)), dict( pylist=[[[1, 2], np.array([3, np.array(4)])], np.array([]), [[5, 6], [7, 8], [9, 0]]], ragged_rank=1, inner_shape=(2,), expected_shape=(3, None, 2)), #========================================================================= # 4-dimensional tensors. dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], expected_shape=(2, None, None, None)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], ragged_rank=1, expected_shape=(2, None, 2, 2)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], inner_shape=(2,), expected_shape=(2, None, None, 2)), dict( pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[2, 4], [6, 8]], [[1, 5], [7, 9]]]], inner_shape=(2, 2), expected_shape=(2, None, 2, 2)), # 4-dimensional tensors with numpy arrays dict( pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]], np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]), expected_shape=(2, None, None, None)), #========================================================================= # Empty tensors (no scalar values) w/ default ragged_rank and inner_shape dict(pylist=[], expected_shape=(0,)), dict(pylist=[[], [], np.array([])], expected_shape=(3, None)), dict( pylist=[[[], []], [], [[], [[]]]], expected_shape=(3, None, None, None)), dict( pylist=np.array([np.array([[], []]), np.array([]), [[], [[]]]]), expected_shape=(3, None, None, None)), #========================================================================= # Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape dict(pylist=[], ragged_rank=1, expected_shape=(0, None)), dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)), dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)), dict( pylist=[], ragged_rank=1, inner_shape=(100, 20), expected_shape=(0, None, 100, 20)), dict( pylist=[], ragged_rank=2, inner_shape=(100, 20), expected_shape=(0, None, None, 100, 20)), dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)), dict(pylist=[], inner_shape=(0,), expected_shape=(0,)), dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)), dict( pylist=np.array([]), ragged_rank=1, inner_shape=(100, 20), expected_shape=(0, None, 100, 20)), #========================================================================= # default/inferred dtypes dict(pylist=[], expected_dtype=dtypes.float32), dict(pylist=[[[], [[[]], []]]], expected_dtype=dtypes.float32), dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=dtypes.int32), dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=dtypes.float32), dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=dtypes.float32), dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=dtypes.string), dict(pylist=[[True]], expected_dtype=dtypes.bool), dict( pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]], expected_dtype=dtypes.float32), #========================================================================= # explicit dtypes dict(pylist=[], dtype=dtypes.float32), dict(pylist=[], dtype=dtypes.string), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int64), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int32), dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.float32), dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float16), dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float32), dict( pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']], dtype=dtypes.string), ) def testRaggedConst(self, pylist, dtype=None, ragged_rank=None, inner_shape=None, expected_shape=None, expected_dtype=None): """Tests that `ragged_const(pylist).eval().tolist() == pylist`. Args: pylist: The `pylist` argument for `ragged_const()`. dtype: The `dtype` argument for `ragged_const()`. If not None, then also test that the resulting ragged tensor has this `dtype`. ragged_rank: The `ragged_rank` argument for `ragged_const()`. If not None, then also test that the resulting ragged tensor has this `ragged_rank`. inner_shape: The `inner_shape` argument for `ragged_const()`. If not None, then also test that the resulting ragged tensor has this `inner_shape`. expected_shape: The expected shape for the resulting ragged tensor. expected_dtype: The expected dtype for the resulting ragged tensor (used to test default/inferred types when dtype=None). """ rt = ragged_factory_ops.constant( pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape) # Normalize the pylist, i.e., convert all np.arrays to list. # E.g., [np.array((1,2))] --> [[1,2]] pylist = _normalize_pylist(pylist) # If dtype was explicitly specified, check it. if dtype is not None: self.assertEqual(rt.dtype, dtype) if expected_dtype is not None: self.assertEqual(rt.dtype, expected_dtype) # If ragged_rank was explicitly specified, check it. if ragged_rank is not None: if isinstance(rt, ragged_tensor.RaggedTensor): self.assertEqual(rt.ragged_rank, ragged_rank) else: self.assertEqual(0, ragged_rank) # If inner_shape was explicitly specified, check it. if inner_shape is not None: if isinstance(rt, ragged_tensor.RaggedTensor): self.assertEqual(rt.flat_values.shape.as_list()[1:], list(inner_shape)) else: self.assertEqual(rt.shape.as_list(), list(inner_shape)) if expected_shape is not None: self.assertEqual(tuple(rt.shape.as_list()), expected_shape) if (expected_shape and expected_shape[0] == 0 and None not in expected_shape): pylist = np.zeros(expected_shape, rt.dtype.as_numpy_dtype) self.assertAllEqual(rt, pylist) @parameterized.parameters( dict( pylist=12, ragged_rank=1, exception=ValueError, message='Invalid pylist=12: incompatible with ragged_rank=1'), dict( pylist=12, inner_shape=(1,), exception=ValueError, message='Invalid pylist=12: incompatible with ' 'dim\\(inner_shape\\)=1'), dict( pylist=[[[1], [2]]], ragged_rank=-1, exception=ValueError, message='Invalid ragged_rank=-1: must be nonnegative'), dict( pylist=[[1, [2]]], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[[1]], [[[2]]]], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[1], [[]]], exception=ValueError, message='Invalid pylist=.*: empty list nesting is greater ' 'than scalar value nesting'), dict( pylist=[1, 2, 3], ragged_rank=1, exception=ValueError, message='pylist has scalar values depth 1, but ragged_rank=1 ' 'requires scalar value depth greater than 1'), dict( pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], ragged_rank=2, exception=ValueError, message='pylist has scalar values depth 2, but ragged_rank=2 ' 'requires scalar value depth greater than 2'), dict(pylist=[1, 2, 3], inner_shape=(1, 1), exception=TypeError), dict( pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], inner_shape=(2, 2), ragged_rank=1, exception=ValueError, message='Invalid pylist=.*: incompatible with ragged_rank=1 and ' 'dim\\(inner_shape\\)=2'), dict( pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), dict( pylist=[[[], [[]]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), ) def testRaggedConstError(self, pylist, dtype=None, ragged_rank=None, inner_shape=None, exception=None, message=None): """Tests that `ragged_const()` raises an expected exception.""" self.assertRaisesRegexp( exception, message, ragged_factory_ops.constant, pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape) @parameterized.parameters([ dict(pylist=9, scalar_depth=0, max_depth=0), dict(pylist=[9], scalar_depth=1, max_depth=1), dict(pylist=[1, 2, 3], scalar_depth=1, max_depth=1), dict(pylist=[[1], [2]], scalar_depth=2, max_depth=2), dict(pylist=[[[1], [2]], [[3]]], scalar_depth=3, max_depth=3), dict(pylist=[], scalar_depth=None, max_depth=1), dict(pylist=[[]], scalar_depth=None, max_depth=2), dict(pylist=[[], [], []], scalar_depth=None, max_depth=2), dict(pylist=[[[], []], [[], [[[]]]], []], scalar_depth=None, max_depth=5), dict( pylist=[1, [2]], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[1], 2], exception=ValueError, message='all scalar values must have the same nesting depth'), dict( pylist=[[[[1]], []], [[2]]], exception=ValueError, message='all scalar values must have the same nesting depth'), ]) def testScalarAndMaxDepthHelper(self, pylist, scalar_depth=None, max_depth=None, exception=None, message=None): """Tests for the _find_scalar_and_max_depth helper function.""" if exception is not None: self.assertRaisesRegexp(exception, message, ragged_factory_ops._find_scalar_and_max_depth, pylist) else: self.assertEqual( ragged_factory_ops._find_scalar_and_max_depth(pylist), (scalar_depth, max_depth)) @parameterized.parameters([ dict(pylist=[[1], [2, 3]], ragged_rank=1, inner_shape=()), dict( pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=1, inner_shape=(1,)), dict(pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=2, inner_shape=()), dict( pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]], ragged_rank=1, inner_shape=(2, 3)), dict( pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]], ragged_rank=2, inner_shape=(3,)), dict( pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]], ragged_rank=3, inner_shape=()), dict( pylist=[[[1], [2, 3]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), dict( pylist=[[[1], [[2]]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), dict( pylist=[[[[1]], [2]]], ragged_rank=1, exception=ValueError, message='inner values have inconsistent shape'), ]) def testDefaultInnerShapeForPylistHelper(self, pylist, ragged_rank, inner_shape=None, exception=None, message=None): """Tests for the _default_inner_shape_for_pylist helper function.""" if exception is not None: self.assertRaisesRegexp( exception, message, ragged.ragged_factory_ops._default_inner_shape_for_pylist, pylist, ragged_rank) else: self.assertEqual( ragged.ragged_factory_ops._default_inner_shape_for_pylist( pylist, ragged_rank), inner_shape) def _normalize_pylist(item): """Convert all (possibly nested) np.arrays contained in item to list.""" # convert np.arrays in current level to list if np.ndim(item) == 0: return item level = (x.tolist() if isinstance(x, np.ndarray) else x for x in item) return [_normalize_pylist(el) if np.ndim(el) != 0 else el for el in level] if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_const_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_gather_ops.gather_nd.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedGatherNdOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): DOCSTRING_PARAMS = [[['000', '001'], ['010']], [['100'], ['110', '111', '112'], ['120']], [[], ['210']]] # pyformat: disable @parameterized.parameters([ #========================================================================= # Docstring Examples #========================================================================= dict( descr='Docstring example 1', params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS), indices=[[2], [0]], expected=ragged_factory_ops.constant_value( [[[], [b'210']], [[b'000', b'001'], [b'010']]])), dict( descr='Docstring example 2', params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS), indices=[[2, 1], [0, 0]], expected=ragged_factory_ops.constant_value( [[b'210'], [b'000', b'001']])), dict( descr='Docstring example 3', params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS), indices=[[0, 0, 1], [1, 1, 2]], expected=[b'001', b'112']), #========================================================================= # Indices with 0 values (selects the entire params) #========================================================================= dict( descr='params: [B1, (B2)], indices: [0], result: [B1, (B2)]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]), indices=np.zeros([0], dtype=np.int32), expected=ragged_factory_ops.constant_value( [[b'a', b'b', b'c'], [b'd']])), dict( descr='params: [B1, (B2)], indices: [A1, 0], result: [A1, B1, (B2)]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]), indices=np.zeros([3, 0], dtype=np.int32), expected=ragged_factory_ops.constant_value( [[[b'a', b'b', b'c'], [b'd']], [[b'a', b'b', b'c'], [b'd']], [[b'a', b'b', b'c'], [b'd']]])), dict( descr=('params: [B1, (B2)], indices: [A1, A2, 0], ' 'result: [A1, A2, B1, (B2)]'), params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]), indices=np.zeros([1, 3, 0], dtype=np.int32), expected=ragged_factory_ops.constant_value( [[[[b'a', b'b', b'c'], [b'd']], [[b'a', b'b', b'c'], [b'd']], [[b'a', b'b', b'c'], [b'd']]]])), dict( descr='params: [B1], indices: [A1, (A2), 0], result: [A1, (A2), B1]', params=['a'], indices=ragged_factory_ops.constant_value( [[[], []], [[]]], ragged_rank=1, dtype=np.int32), expected=ragged_factory_ops.constant_value( [[[b'a'], [b'a']], [[b'a']]], ragged_rank=1)), #========================================================================= # Indices with 1 value (selects row from params) #========================================================================= dict( descr='params: [B1, (B2)], indices: [A1, 1], result: [A1, (B2)]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]), indices=[[1], [0]], expected=ragged_factory_ops.constant_value( [[b'd'], [b'a', b'b', b'c']])), dict( descr=('params: [B1, (B2), (B3)], indices: [A1, 1], ' 'result: [A1, (B2), (B3)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]]), indices=[[1], [1]], expected=ragged_factory_ops.constant_value( [[[b'e', b'f']], [[b'e', b'f']]])), dict( descr=('params: [B1, B2, B3], indices: [A1, (A2), 1], ' 'result: [A1, (A2), B2, B3]'), params=[[['a']], [['b']]], indices=ragged_factory_ops.constant_value([[[0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[[b'a']]]], ragged_rank=1)), #========================================================================= # Indices with 2 values (selects row & col from params) #========================================================================= dict( descr='params: [B1, (B2)], indices: [A1, 2], result: [A1]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]), indices=[[1, 0], [0, 0], [0, 2]], expected=ragged_factory_ops.constant_value([b'd', b'a', b'c'])), dict( descr=('params: [B1, (B2), (B3)], indices: [A1, 2], ' 'result: [A1, (B3)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]]), indices=[[1, 0], [0, 1], [0, 0]], expected=ragged_factory_ops.constant_value( [[b'e', b'f'], [b'd'], [b'a', b'b', b'c']])), dict( descr=('params: [B1, (B2), (B3)], indices: [A1, A2, 2], ' 'result: [A1, (A2), (B3)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]]), indices=[[[1, 0], [0, 1], [0, 0]]], expected=ragged_factory_ops.constant_value( [[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']]])), dict( descr=('params: [B1, (B2), B3], indices: [A1, A2, 2], ' 'result: [A1, A2, B3]'), params=ragged_factory_ops.constant_value( [[['a', 'b'], ['c', 'd']], [['e', 'f']]], ragged_rank=1), indices=[[[1, 0], [0, 1], [0, 0]]], expected=[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]), dict( descr=('params: [B1, (B2), B3], indices: [A1, A2, A3, 2], ' 'result: [A1, A2, A3, B3]'), params=ragged_factory_ops.constant_value( [[['a', 'b'], ['c', 'd']], [['e', 'f']]], ragged_rank=1), indices=[[[[1, 0], [0, 1], [0, 0]]]], expected=[[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]]), dict( descr=('params: [B1, (B2), (B3)], indices: [A1, (A2), 2], ' 'result: [A1, (A2), (B3)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]]), indices=ragged_factory_ops.constant_value( [[[1, 0], [0, 1]], [[0, 0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[b'e', b'f'], [b'd']], [[b'a', b'b', b'c']]])), #========================================================================= # Indices with 3 values #========================================================================= dict( descr=('params: [B1, (B2), (B3)], indices: [A1, 3], ' 'result: [A1]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]]), indices=[[1, 0, 1], [0, 0, 0], [0, 1, 0]], expected=[b'f', b'a', b'd']), dict( descr=('params: [B1, (B2), B3], indices: [A1, 3], ' 'result: [A1]'), params=ragged_factory_ops.constant_value( [[['a', 'b'], ['c', 'd']], [['e', 'f']]], ragged_rank=1), indices=[[1, 0, 1], [0, 0, 0], [0, 1, 1]], expected=[b'f', b'a', b'd']), dict( descr=('params: [B1, (B2), (B3), B4], indices: [A1, 3], ' 'result: [A1, B4]'), params=ragged_factory_ops.constant_value( [[[['a', 'b'], ['c', 'd']], [['e', 'f']]]], ragged_rank=2), indices=[[0, 0, 1], [0, 0, 0], [0, 1, 0]], expected=[[b'c', b'd'], [b'a', b'b'], [b'e', b'f']]), ]) # pyformat: disable def testRaggedGatherNd(self, descr, params, indices, expected): result = ragged_gather_ops.gather_nd(params, indices) self.assertAllEqual(result, expected) def testRaggedGatherNdUnknownRankError(self): if context.executing_eagerly(): return params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd']]) indices1 = array_ops.placeholder(dtypes.int32, shape=None) indices2 = array_ops.placeholder(dtypes.int32, shape=[None]) with self.assertRaisesRegexp(ValueError, 'indices.rank be statically known.'): ragged_gather_ops.gather_nd(params, indices1) with self.assertRaisesRegexp( ValueError, r'indices.shape\[-1\] must be statically known.'): ragged_gather_ops.gather_nd(params, indices2) @parameterized.parameters([ dict( params=['a'], indices=0, error=(ValueError, errors.InvalidArgumentError)), dict( params=ragged_factory_ops.constant_value([['a']]), indices=0, message='indices.rank must be at least 1.'), dict( params=['a', 'b', 'c'], indices=ragged_factory_ops.constant_value([[0]]), message='The innermost dimension of indices may not be ragged'), ]) def testRaggedGatherNdStaticError(self, params, indices, message=None, error=ValueError): with self.assertRaisesRegexp(error, message): ragged_gather_ops.gather_nd(params, indices) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_functional_ops.map_flat_values.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedMapInnerValuesOpTest(test_util.TensorFlowTestCase): def assertRaggedMapInnerValuesReturns(self, op, expected, args=(), kwargs=None): kwargs = kwargs or {} result = ragged_functional_ops.map_flat_values(op, *args, **kwargs) self.assertAllEqual(result, expected) def testDocStringExamples(self): """Test the examples in apply_op_to_ragged_values.__doc__.""" rt = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6]]) v1 = ragged_functional_ops.map_flat_values(array_ops.ones_like, rt) v2 = ragged_functional_ops.map_flat_values(math_ops.multiply, rt, rt) v3 = ragged_functional_ops.map_flat_values(math_ops.add, rt, 5) self.assertAllEqual(v1, [[1, 1, 1], [], [1, 1], [1]]) self.assertAllEqual(v2, [[1, 4, 9], [], [16, 25], [36]]) self.assertAllEqual(v3, [[6, 7, 8], [], [9, 10], [11]]) def testOpWithSingleRaggedTensorArg(self): tensor = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) self.assertRaggedMapInnerValuesReturns( op=array_ops.zeros_like, args=(tensor,), expected=[[0, 0, 0], [], [0, 0]]) def testOpWithTwoRaggedTensorArgs(self): x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]]) y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x, y), expected=[[3, 2, 12], [], [4, 25]]) def testOpWithRaggedTensorAndScalarArgs(self): y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(5, y), expected=[[5, 10, 15], [], [20, 25]]) def testOpWithThreeRaggedTensorArgs(self): condition = ragged_factory_ops.constant( [[True, True, False], [], [True, False]]) # pyformat: disable x = ragged_factory_ops.constant([['a', 'b', 'c'], [], ['d', 'e']]) y = ragged_factory_ops.constant([['A', 'B', 'C'], [], ['D', 'E']]) self.assertRaggedMapInnerValuesReturns( op=array_ops.where_v2, args=(condition, x, y), expected=[[b'a', b'b', b'C'], [], [b'd', b'E']]) def testOpWithRaggedTensorListArg(self): x = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) y = ragged_factory_ops.constant([[10, 20, 30], [], [40, 50]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.add_n, args=([x, y, x],), expected=[[12, 24, 36], [], [48, 60]]) def testOpWithKeywordArgs(self): x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]]) y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, kwargs=dict(x=x, y=y), expected=[[3, 2, 12], [], [4, 25]]) def testOpWithMixedPositionalAndKeywordArgs(self): x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]]) y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x,), kwargs=dict(y=y), expected=[[3, 2, 12], [], [4, 25]]) def testNonElementWiseOp(self): x = ragged_factory_ops.constant( [[[3, 1, 4], [1, 5, 9], [2, 6, 5]], [], [[3, 5, 8], [9, 7, 9]]], ragged_rank=1) self.assertRaggedMapInnerValuesReturns( op=math_ops.reduce_sum, kwargs={ 'input_tensor': x, 'axis': 1, }, expected=[[8, 15, 13], [], [16, 25]]) def testOpWithRaggedRankGreaterThanOne(self): # ragged_rank=0 x0 = [3, 1, 4, 1, 5, 9, 2, 6, 5] y0 = [1, 2, 3, 4, 5, 6, 7, 8, 9] self.assertAllEqual( math_ops.multiply(x0, y0), [3, 2, 12, 4, 25, 54, 14, 48, 45]) # ragged_rank=1 x1 = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5], [9, 2], [6, 5]]) y1 = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6, 7], [8, 9]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x1, y1), expected=[[3, 2, 12], [], [4, 25], [54, 14], [48, 45]]) # ragged_rank=2 x2 = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]], [[9, 2], [6, 5]]]) y2 = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]], [[6, 7], [8, 9]]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x2, y2), expected=[[[3, 2, 12]], # row 0 [], # row 1 [[], [4, 25]], # row 2 [[54, 14], [48, 45]] # row 3 ]) # pyformat: disable # ragged_rank=3 x3 = ragged_factory_ops.constant([[[[3, 1, 4]], []], [], [[[], [1, 5]]], [[[9, 2], [6, 5]]]]) y3 = ragged_factory_ops.constant([[[[1, 2, 3]], []], [], [[[], [4, 5]]], [[[6, 7], [8, 9]]]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x3, y3), expected=[ [[[3, 2, 12]], []], # row 0 [], # row 1 [[[], [4, 25]]], # row 2 [[[54, 14], [48, 45]]] # row 3 ]) # pyformat: disable def testOpWithRaggedRankThree(self): x = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]]]) y = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]]]) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x, y), expected=[[[3, 2, 12]], [], [[], [4, 25]]]) def testOpWithInnerValuesOnly(self): x = constant_op.constant([[1, 2], [3, 4], [5, 6]]) y = constant_op.constant(2) self.assertRaggedMapInnerValuesReturns( op=math_ops.multiply, args=(x, y), expected=[[2, 4], [6, 8], [10, 12]]) def testRaggedTensorSplitsRaggedRankMismatchError(self): x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]]) y = ragged_factory_ops.constant([[[3, 1, 4], []], [], [[1, 5]]]) self.assertRaisesRegexp( ValueError, r'Inputs must have identical ragged splits.*', ragged_functional_ops.map_flat_values, math_ops.add, x, y) def testRaggedTensorSplitsValueMismatchError(self): x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]]) y = ragged_factory_ops.constant([[1], [2, 3], [4, 5]]) self.assertRaisesRegexp(errors.InvalidArgumentError, r'Inputs must have identical ragged splits.*', ragged_functional_ops.map_flat_values, math_ops.add, x, y) def testRaggedTensorSplitsMismatchErrorAtRuntime(self): splits1 = array_ops.placeholder_with_default( constant_op.constant([0, 3, 3, 5], dtypes.int64), None) splits2 = array_ops.placeholder_with_default( constant_op.constant([0, 1, 3, 5], dtypes.int64), None) x = ragged_tensor.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1) y = ragged_tensor.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*Inputs must have identical ragged splits'): self.evaluate(ragged_functional_ops.map_flat_values(math_ops.add, x, y)) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_math_ops.reduce_<AGGREGATE> ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.platform import googletest _MAX_INT32 = dtypes.int32.max _MIN_INT32 = dtypes.int32.min _NAN = np.nan def mean(*values): return 1.0 * sum(values) / len(values) @test_util.run_all_in_graph_and_eager_modes class RaggedReduceOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters( #========================================================================= # Docstring examples. RaggedTensor for testing is: # [[3, 1, 4], # [1, 5, ], # [9, ], # [2, 6 ]] #========================================================================= dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-2, expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=-1, expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, expected=[54, 30, 4] # = [3*1*9*2, 1*5*6, 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, expected=[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, expected=[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, expected=[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, expected=[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=1, expected=[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)] ), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]], axis=0, expected=[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4] ), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, expected=[True, True, False, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_any, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, expected=[True, True, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=0, expected=[False, True, False, True]), dict( ragged_reduce_op=ragged_math_ops.reduce_all, rt_input=[[True, True], [True, True, False, True], [False, True]], axis=1, expected=[True, False, False]), #========================================================================= # Examples with the following RaggedTensor (ragged_rank=1): # [[0, 1, 2, 3], # [4 ], # [ ], # [5, 6 ], # [7 ], # [8, 9 ]] #========================================================================= # axis=None dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, expected=0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, expected=0 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, expected=min(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, expected=max(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=None, expected=mean(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)), # axis=0 dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, expected=[0 + 4 + 5 + 7 + 8, 1 + 6 + 9, 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, expected=[0 * 4 * 5 * 7 * 8, 1 * 6 * 9, 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, expected=[min(0, 4, 5, 7, 8), min(1, 6, 9), 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, expected=[max(0, 4, 5, 7, 8), max(1, 6, 9), 2, 3]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=0, expected=[mean(0, 4, 5, 7, 8), mean(1, 6, 9), 2, 3]), # axis=1 # Note: we don't test mean here because it gives a NaN, and this will # cause assertEqual to fail (since NaN != NaN). See testMeanNan(). dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, expected=[0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_prod, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, expected=[0 * 1 * 2 * 3, 4, 1, 5 * 6, 7, 8 * 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_min, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, expected=[min(0, 1, 2, 3), 4, _MAX_INT32, min(5, 6), 7, min(8, 9)]), dict( ragged_reduce_op=ragged_math_ops.reduce_max, rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]], axis=1, expected=[max(0, 1, 2, 3), 4, _MIN_INT32, max(5, 6), 7, max(8, 9)]), #========================================================================= # Examples with ragged_rank=2: # [[[1, 2], [ ], [3, 4, 5]], # [[6, 7], [ ], [8 ]], # [ ], # [[9 ] ]] #========================================================================= dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[], expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=None, expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=0, expected=[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=1, expected=[[1 + 3, 2 + 4, 5], [6 + 8, 7], [], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=2, expected=[[1 + 2, 0, 3 + 4 + 5], [6 + 7, 0, 8], [], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1], expected=[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 2], expected=[1 + 6 + 9 + 2 + 7, 0, 3 + 8 + 4 + 5]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[1, 2], expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[0, 1, 2], expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), #========================================================================= # Examples for ragged_reduce_mean ragged_rank=2: # [[[1, 2], [3, 4, 5]], # [[6, 7], [8 ]], # [[9 ] ]] #========================================================================= dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=0, expected=[[mean(1, 6, 9), mean(2, 7)], [mean(3, 8), 4, 5]]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=1, expected=[[mean(1, 3), mean(2, 4), 5], [mean(6, 8), 7], [9]]), dict( ragged_reduce_op=ragged_math_ops.reduce_mean, rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]], axis=2, expected=[[mean(1, 2), mean(3, 4, 5)], [mean(6, 7), 8], [9]]), # Test case for GitHub issue 27497, multiple negative axes. dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[-2, -1], expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]), dict( ragged_reduce_op=ragged_math_ops.reduce_sum, rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]], axis=[-3, -2, -1], expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])), ) def testReduce(self, ragged_reduce_op, rt_input, axis, expected): rt_input = ragged_factory_ops.constant(rt_input) reduced = ragged_reduce_op(rt_input, axis) self.assertAllEqual(reduced, expected) def assertEqualWithNan(self, actual, expected): """Like assertEqual, but NaN==NaN.""" self.assertTrue( ((actual == expected) | (np.isnan(actual) & np.isnan(expected))).all()) def testMeanNan(self): rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]] expected = ( np.array([0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]) / np.array( [4, 1, 0, 2, 1, 2])) rt_input = ragged_factory_ops.constant(rt_as_list) reduced = ragged_math_ops.reduce_mean(rt_input, axis=1) self.assertEqualWithNan(self.evaluate(reduced), expected) def testMeanWithTensorInputs(self): tensor = [[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]] expected = [2.0, 20.0] reduced = ragged_math_ops.reduce_mean(tensor, axis=1) self.assertAllEqual(reduced, expected) def testErrors(self): rt_input = ragged_factory_ops.constant([[1, 2, 3], [4, 5]]) axis = array_ops.placeholder_with_default(constant_op.constant([0]), None) if not context.executing_eagerly(): self.assertRaisesRegexp( ValueError, r'axis must be known at graph construction time.', ragged_math_ops.reduce_sum, rt_input, axis) self.assertRaisesRegexp(TypeError, r'axis must be an int; got str.*', ragged_math_ops.reduce_sum, rt_input, ['x']) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_reduce_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.where.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_where_op from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedWhereOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ #========================================================================= # Docstring Examples #========================================================================= dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), expected=[[0, 0], [0, 2], [1, 1]]), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), x=ragged_factory_ops.constant_value( [['A', 'B', 'C'], ['D', 'E']]), y=ragged_factory_ops.constant_value( [['a', 'b', 'c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'b', b'C'], [b'd', b'E']])), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value([True, False]), x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]), y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'B', b'C'], [b'd', b'e']])), #========================================================================= # Coordinate-retrieval mode #========================================================================= dict( # shape=[D1] condition=[True, False, True, False, True], expected=[[0], [2], [4]]), dict( # shape=[D1, D2] condition=[[True, False], [False, True]], expected=[[0, 0], [1, 1]]), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), expected=[[0, 0], [0, 2], [1, 1]]), dict( # shape=[D1, (D2), (D3)] condition=ragged_factory_ops.constant_value([ [[True, False, True], [False, True]], [[True], [], [False], [False, True, False]] ]), expected=[[0, 0, 0], [0, 0, 2], [0, 1, 1], [1, 0, 0], [1, 3, 1]]), dict( # shape=[D1, (D2), D3] condition=ragged_factory_ops.constant_value([ [[True, False], [False, True]], [[True, False], [False, False], [True, False], [False, True]] ], ragged_rank=1), expected=[[0, 0, 0], [0, 1, 1], [1, 0, 0], [1, 2, 0], [1, 3, 1]]), dict( # shape=[D1, (D2), (D3), (D4)] condition=ragged_factory_ops.constant_value([ [[[], [True]]], [[[True, False, True], [False, True]], [[True], [], [False], [False, True, False]]] ]), expected=[[0, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 2], [1, 0, 1, 1], [1, 1, 0, 0], [1, 1, 3, 1]]), #========================================================================= # Elementwise value-selection mode #========================================================================= dict( # shape=[] condition=True, x='A', y='a', expected=b'A'), dict( # shape=[] condition=False, x='A', y='a', expected=b'a'), dict( # shape=[D1] condition=[True, False, True], x=['A', 'B', 'C'], y=['a', 'b', 'c'], expected=[b'A', b'b', b'C']), dict( # shape=[D1, D2] condition=[[True, False], [False, True]], x=[['A', 'B'], ['D', 'E']], y=[['a', 'b'], ['d', 'e']], expected=[[b'A', b'b'], [b'd', b'E']]), dict( # shape=[D1, (D2)] condition=ragged_factory_ops.constant_value( [[True, False, True], [False, True]]), x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]), y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'b', b'C'], [b'd', b'E']])), dict( # shape=[D1, (D2), D3] condition=ragged_factory_ops.constant_value([ [[True, False], [False, True]], [[True, False], [False, False], [True, False], [False, True]] ], ragged_rank=1), x=ragged_factory_ops.constant_value([ [['A', 'B'], ['C', 'D']], [['E', 'F'], ['G', 'H'], ['I', 'J'], ['K', 'L']] ], ragged_rank=1), y=ragged_factory_ops.constant_value([ [['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h'], ['i', 'j'], ['k', 'l']] ], ragged_rank=1), expected=ragged_factory_ops.constant_value([ [[b'A', b'b'], [b'c', b'D']], [[b'E', b'f'], [b'g', b'h'], [b'I', b'j'], [b'k', b'L']] ], ragged_rank=1)), dict( # shape=[D1, (D2), (D3), (D4)] condition=ragged_factory_ops.constant_value([ [[[], [True]]], [[[True, False, True], [False, True]], [[True], [], [False], [False, True, False]]] ]), x=ragged_factory_ops.constant_value([ [[[], ['A']]], [[['B', 'C', 'D'], ['E', 'F']], [['G'], [], ['H'], ['I', 'J', 'K']]] ]), y=ragged_factory_ops.constant_value([ [[[], ['a']]], [[['b', 'c', 'd'], ['e', 'f']], [['g'], [], ['h'], ['i', 'j', 'k']]] ]), expected=ragged_factory_ops.constant_value([ [[[], [b'A']]], [[[b'B', b'c', b'D'], [b'e', b'F']], [[b'G'], [], [b'h'], [b'i', b'J', b'k']]] ])), #========================================================================= # Elementwise row-selection mode #========================================================================= dict( # x.shape=[D1, D2], y.shape=[D1, D2] condition=[True, False, True], x=[['A', 'B'], ['C', 'D'], ['E', 'F']], y=[['a', 'b'], ['c', 'd'], ['e', 'f']], expected=[[b'A', b'B'], [b'c', b'd'], [b'E', b'F']]), dict( # x.shape=[D1, D2], y.shape=[D1, (D2)] condition=[True, False, True], x=[['A', 'B'], ['C', 'D'], ['E', 'F']], y=ragged_factory_ops.constant_value( [['a', 'b'], ['c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'B'], [b'c'], [b'E', b'F']])), dict( # x.shape=[D1, (D2)], y.shape=[D1, (D2)] condition=[True, False, True], x=ragged_factory_ops.constant_value( [['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]), y=ragged_factory_ops.constant_value( [['a', 'b'], ['c'], ['d', 'e']]), expected=ragged_factory_ops.constant_value( [[b'A', b'B', b'C'], [b'c'], [b'F', b'G']])), dict( # shape=[D1, (D2), (D3), (D4)] condition=ragged_factory_ops.constant_value([True, False]), x=ragged_factory_ops.constant_value([ [[[], ['A']]], [[['B', 'C', 'D'], ['E', 'F']], [['G'], [], ['H'], ['I', 'J', 'K']]] ]), y=ragged_factory_ops.constant_value([[[['a']]], [[['b']]]]), expected=ragged_factory_ops.constant_value( [[[[], [b'A']]], [[[b'b']]]])), ]) # pyformat: disable def testRaggedWhere(self, condition, expected, x=None, y=None): result = ragged_where_op.where(condition, x, y) self.assertAllEqual(result, expected) @parameterized.parameters([ dict( condition=[True, False], x=[1, 2], error=ValueError, message='x and y must be either both None or both non-None'), dict( condition=ragged_factory_ops.constant_value([[True, False, True], [False, True]]), x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]), y=[['a', 'b'], ['d', 'e']], error=ValueError, message='Input shapes do not match.'), ]) def testRaggedWhereErrors(self, condition, error, message, x=None, y=None): with self.assertRaisesRegexp(error, message): ragged_where_op.where(condition, x, y) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_where_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_batch_gather_ops.batch_gather.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_batch_gather_ops from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedBatchGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ #========================================================================= # Docstring Example #========================================================================= dict( descr='Docstring example', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d'], [], ['e']]), indices=ragged_factory_ops.constant_value([[1, 2, 0], [], [], [0, 0]]), expected=ragged_factory_ops.constant_value([[b'b', b'c', b'a'], [], [], [b'e', b'e']])), #========================================================================= # 0 Batch Dimensions #========================================================================= dict( descr='params: [P1], indices: [I], result: [I]', params=['a', 'b', 'c', 'd'], indices=[3, 2], expected=[b'd', b'c']), dict( descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]', params=ragged_factory_ops.constant_value([['a', 'b'], [], ['c'], ['d', 'e']]), indices=[3, 2], expected=ragged_factory_ops.constant_value([[b'd', b'e'], [b'c']])), #========================================================================= # 1 Batch Dimension #========================================================================= dict( descr='params: [B1, P1], indices: [B1, I], result: [B1, I]', params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']], indices=[[2, 0], [0, 1], [1, 0]], expected=[[b'c', b'a'], [b'd', b'e'], [b'h', b'g']]), dict( descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]', params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e'], ['g']]), indices=[[2, 0], [0, 1], [0, 0]], expected=[[b'c', b'a'], [b'd', b'e'], [b'g', b'g']]), dict( descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]', params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']], indices=ragged_factory_ops.constant_value([[2, 0, 2], [0], [1]]), expected=ragged_factory_ops.constant_value([[b'c', b'a', b'c'], [b'd'], [b'h']])), dict( descr=('params: [B1, (P1), (P2), P3], indices: [B1, I], ' 'result: [B1, I, (P2), P3]'), params=ragged_factory_ops.constant_value( [[[['a']], [['b'], ['c']]], [[['d'], ['e']], [['f']]], [[['g']]]], ragged_rank=2), indices=[[1, 0], [0, 1], [0, 0]], expected=ragged_factory_ops.constant_value( [[[[b'b'], [b'c']], [[b'a']]], [[[b'd'], [b'e']], [[b'f']]], [[[b'g']], [[b'g']]]], ragged_rank=2)), #========================================================================= # 2 Batch Dimensions #========================================================================= dict( descr=('params: [B1, B2, P1], indices: [B1, B2, I], ' 'result: [B1, B2, I]'), params=[[['a', 'b', 'c']], [['d', 'e', 'f']], [['g', 'h', 'i']]], indices=[[[2, 0]], [[0, 1]], [[1, 0]]], expected=[[[b'c', b'a']], [[b'd', b'e']], [[b'h', b'g']]]), dict( descr=('params: [B1, (B2), P1], indices: [B1, (B2), I], ' 'result: [B1, (B2), I]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]], ragged_rank=1), indices=ragged_factory_ops.constant_value( [[[2, 0], [0, 1]], [[1, 0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[b'c', b'a'], [b'd', b'e']], [[b'h', b'g']]], ragged_rank=1)), dict( descr=('params: [B1, (B2), (P1)], indices: [B1, (B2), I], ' 'result: [B1, (B2), I]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d']], [['e', 'f']]], ragged_rank=2), indices=ragged_factory_ops.constant_value( [[[2, 0], [0, 0]], [[1, 0]]], ragged_rank=1), expected=ragged_factory_ops.constant_value( [[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]], ragged_rank=1)), dict( descr=('params: [B1, (B2), P1], indices: [B1, (B2), (I)], ' 'result: [B1, (B2), (I)]'), params=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]], ragged_rank=1), indices=ragged_factory_ops.constant_value( [[[2, 1, 0], [0]], [[1, 1]]], ragged_rank=2), expected=ragged_factory_ops.constant_value( [[[b'c', b'b', b'a'], [b'd']], [[b'h', b'h']]], ragged_rank=2)), #========================================================================= # 3 Batch Dimensions #========================================================================= dict( descr=( 'params: [B1, (B2), (B3), (P1)], indices: [B1, (B2), (B3), I], ' 'result: [B1, (B2), (B3), I]'), params=ragged_factory_ops.constant_value( [[[['a', 'b', 'c'], ['d']], [['e', 'f']]]], ragged_rank=3), indices=ragged_factory_ops.constant_value( [[[[2, 0], [0, 0]], [[1, 0]]]], ragged_rank=2), expected=ragged_factory_ops.constant_value( [[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]]], ragged_rank=2)), ]) def testRaggedBatchGather(self, descr, params, indices, expected): result = ragged_batch_gather_ops.batch_gather(params, indices) self.assertAllEqual(result, expected) @parameterized.parameters([ # Docstring example: dict( descr='Docstring example', params=[['a', 'b', 'c'], ['d'], [], ['e']], indices=[[1, 2, -1], [], [], [0, 10]], expected=[['b', 'c', 'FOO'], [], [], ['e', 'FOO']], default_value='FOO', ), # Dimensions: # indices: [4] # params: [2, (d1), (d2)] dict( descr='params: [2, (d1), (d2), indices: [4]', indices=[1, 100, 0, -1], params=[[['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']], [["It's", 'always', 'darkest', 'before', 'the', 'dawn']]], expected=[[["It's", 'always', 'darkest', 'before', 'the', 'dawn']], [['$NONE^']], [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']], [['$NONE^']]], ), # Dimensions: # params: [1, (d1)] # indices: [3] dict( descr='params: rank 2, indices: rank 1', params=[ ['Bruce', 'Wayne'], ], indices=[-1, 0, 1000], expected=[['$NONE^'], ['Bruce', 'Wayne'], ['$NONE^']] ), # Dimensions: # params: [1, (d1)] # indices: [1, (d2)] dict( descr='Test underbound indices of shape [1, (d2)]', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ], indices=[[8, -1]], expected=[['!', '$NONE^']], ), dict( descr='Test underbound indices of shape [2, (d2)]', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Who', 'let', 'the', 'dogs', 'out', '?'], ], indices=[[8, -1], [1, 100]], expected=[['!', '$NONE^'], ['let', '$NONE^']], ), # Dimensions: # params: [2, (d1)] # indices: [2, (d2)] dict( descr='Test underbound indices of rank 2', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.']], indices=[[8, -1], [3, 23, 35, 45, 75, 83, -121]], expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']], ), dict( descr='Test overbound indices of rank 2', params=[ ['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.']], indices=[[8, 8823], [3, 23, 35, 45, 75, 83, 1234]], expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']], ), # Dimensions: # params: [2, (d1), 2] # indices: [2, (d2)] dict( descr='params: rank 3, indices: rank 2', params=[ [['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']], [['Who', 'let'], ['the', 'dogs'], ['out', '?']], ], ragged_rank=1, indices=[[1, -1, 2, 30], [1, 100]], indices_ragged_rank=1, expected=[[['takeover', 'offer'], ['$NONE^', '$NONE^'], ['from', 'Microsoft'], ['$NONE^', '$NONE^']], [['the', 'dogs'], ['$NONE^', '$NONE^']]], expected_ragged_rank=1, default_value=['$NONE^', '$NONE^'], ), # Dimensions: # params: [2, (d1), (d2)] # indices: [2, (d3)] dict( descr='params: [2, (d1), (d2)], indices: [2, (d3)]', params=[ [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'], ], [['It\'s', 'always', 'darkest', 'before', 'the', 'dawn']] ], indices=[[1, 100], [0, -1]], expected=[[['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'], ['$NONE^']], [["It's", 'always', 'darkest', 'before', 'the', 'dawn'], ['$NONE^']]] ), # Dimensions: # params: [2, (d1), (d2)] # indices: [2, (d1), (d3)] dict( descr='Test overbound indices of rank 3', params=[ [['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'], ['Foo', 'bar', 'mar']], [['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the', 'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand', 'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He', 'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes', 'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',', 'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against', 'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out', 'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of', 'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they', 'had', 'previously', 'chanted', 'in', 'Hebrew', '.'], ['I', 'too', 'was', 'hustled', 'scammed', 'bamboozled', 'hood', 'winked', 'lead', 'astray']] ], indices=[[[8, 8823], [0, 100]], [[3, 23, 35, 45, 75, 83, 1234], [5]]], expected=[[['!', '$NONE^'], ['Foo', '$NONE^']], [['.', '.', '.', '.', '!', '.', '$NONE^'], ['bamboozled']]], ), # params.shape = [2, (d1), 8] # indices.shape = [2, (d1), 3] dict( descr='params = [2, (2, 1), 8], indices = [2, (2, 1), 3]', params=[[['h'] * 8, ['w'] * 8], [['b'] * 8]], ragged_rank=1, indices=[[[0, 100, 1], [0, 1, 0]], [[1, 0, 0]]], indices_ragged_rank=1, expected=[[['h', '$NONE^', 'h'], ['w', 'w', 'w']], [['b', 'b', 'b']]], expected_ragged_rank=1, ), ]) def testRaggedBatchGatherWithDefault( self, descr, params, indices, expected, indices_ragged_rank=None, expected_ragged_rank=None, ragged_rank=None, default_value='$NONE^'): params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank) indices = ragged_factory_ops.constant( indices, ragged_rank=indices_ragged_rank or ragged_rank) expected = ragged_factory_ops.constant( expected, ragged_rank=expected_ragged_rank or ragged_rank) result = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) self.assertAllEqual(result, expected) @parameterized.parameters([ # Dimensions: # params: dims [2, 5], indices: [2, 2] dict( descr='params: dims [2, 5], indices: [2, 2]', params=[ ['The', 'deal', 'came', 'about', '18'], ['He', 'left', 'us', '.', 'Little']], indices=[[0, -1], [3, 121]], expected=[['The', '$NONE^'], ['.', '$NONE^']], default_value='$NONE^', ), # Dimensions: # params: dims [2, 2, 5], indices: [2, 2] dict( descr='params: dims [2, 2, 5], indices: [2, 2]', params=[ [['The', 'deal', 'came', 'about', '18'], ['The', 'deal', 'came', 'about', '19'], ], [['He', 'left', 'us', '.', 'Little'], ['The', 'deal', 'came', 'about', '20'], ] ], indices=[[0, -1], [0, 121]], expected=[[['The', 'deal', 'came', 'about', '18'], ['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']], [['He', 'left', 'us', '.', 'Little'], ['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']]], default_value='$NONE^', ), # Test default_value with shape [5] dict( descr='params: dims [2, 2, 5], indices: [2, 2]', params=[ [['The', 'deal', 'came', 'about', '18'], ['The', 'deal', 'came', 'about', '19'], ], [['He', 'left', 'us', '.', 'Little'], ['The', 'deal', 'came', 'about', '20'], ] ], indices=[[0, -1], [0, 121]], expected=[[['The', 'deal', 'came', 'about', '18'], [':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']], [['He', 'left', 'us', '.', 'Little'], [':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']]], default_value=[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:'], ), ]) def testRaggedBatchGatherWithDefaultOnTensors( self, descr, params, indices, expected, default_value): params = constant_op.constant(params) indices = constant_op.constant(indices) expected = constant_op.constant(expected) result = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) self.assertAllEqual(expected, result) @parameterized.parameters([ dict( params=[['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.']], indices=[[[8, -1]]], # Exception here because different errors are thrown in eager vs # graph mode. error=Exception, default_value='$NONE^', ), ]) def testRankMismatch( self, params, indices, default_value, error): params = ragged_factory_ops.constant(params) indices = ragged_factory_ops.constant(indices) with self.assertRaises(error): _ = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) @parameterized.parameters([ # Dimensions: # params: [2, (d1), 2] # indices: [2, (d2)] # default_value: [] dict( descr='params: rank 3, indices: rank 2, default: rank = [], but' ' should be [2]', params=[ [['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']], [['Who', 'let'], ['the', 'dogs'], ['out', '?']], ], ragged_rank=1, indices=[[1, -1, 2, 30], [1, 100]], indices_ragged_rank=1, default_value='$NONE^', error=Exception, ) ]) def testInvalidDefaultValueRank( self, descr, params, indices, default_value, error, ragged_rank=None, indices_ragged_rank=None): params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank) indices = ragged_factory_ops.constant( indices, ragged_rank=indices_ragged_rank) with self.assertRaises(error): _ = ragged_batch_gather_with_default_op.batch_gather_with_default( params, indices, default_value) def testRaggedBatchGatherUnknownRankError(self): if context.executing_eagerly(): return params = [['a', 'b'], ['c', 'd']] indices = array_ops.placeholder(dtypes.int32, shape=None) ragged_indices = ragged_tensor.RaggedTensor.from_row_splits( indices, [0, 2, 4]) with self.assertRaisesRegexp( ValueError, 'batch_gather does not allow indices with unknown shape.'): ragged_batch_gather_ops.batch_gather(params, indices) with self.assertRaisesRegexp( ValueError, 'batch_gather does not allow indices with unknown shape.'): ragged_batch_gather_ops.batch_gather(params, ragged_indices) @parameterized.parameters( [ dict( params=ragged_factory_ops.constant_value([['a'], ['b'], ['c']]), indices=ragged_factory_ops.constant_value([[0], [0]]), message='Dimensions 3 and 2 are not compatible'), dict( params=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], indices=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]]]), message='batch shape from indices does not match params shape'), dict( # rank mismatch params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]]]), indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]], [[0]]]), error=(ValueError, errors.InvalidArgumentError)), dict( params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]], [[0]], [[0]]]), indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]], [[0]]]), error=errors.InvalidArgumentError, message='.*Condition x == y did not hold.*'), dict( params=ragged_factory_ops.constant_value(['a', 'b', 'c']), indices=ragged_factory_ops.constant_value([[0], [0]]), message='batch shape from indices does not match params shape'), dict( params=ragged_factory_ops.constant_value([['a']]), indices=0, message='indices.rank must be at least 1.'), dict( params=ragged_factory_ops.constant_value([['a']]), indices=[[[0]]], message='batch shape from indices does not match params shape'), ]) def testRaggedBatchGatherStaticError(self, params, indices, message=None, error=ValueError): with self.assertRaisesRegexp(error, message): ragged_batch_gather_ops.batch_gather(params, indices) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_range op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedRangeOpTest(test_util.TensorFlowTestCase): def testDocStringExamples(self): """Examples from ragged_range.__doc__.""" rt1 = ragged_math_ops.range([3, 5, 2]) self.assertAllEqual(rt1, [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]) rt2 = ragged_math_ops.range([0, 5, 8], [3, 3, 12]) self.assertAllEqual(rt2, [[0, 1, 2], [], [8, 9, 10, 11]]) rt3 = ragged_math_ops.range([0, 5, 8], [3, 3, 12], 2) self.assertAllEqual(rt3, [[0, 2], [], [8, 10]]) def testBasicRanges(self): # Specify limits only. self.assertAllEqual( ragged_math_ops.range([0, 3, 5]), [list(range(0)), list(range(3)), list(range(5))]) # Specify starts and limits. self.assertAllEqual( ragged_math_ops.range([0, 3, 5], [2, 3, 10]), [list(range(0, 2)), list(range(3, 3)), list(range(5, 10))]) # Specify starts, limits, and deltas. self.assertAllEqual( ragged_math_ops.range([0, 3, 5], [4, 4, 15], [2, 3, 4]), [list(range(0, 4, 2)), list(range(3, 4, 3)), list(range(5, 15, 4))]) def testFloatRanges(self): expected = [[0.0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6], [3.0], [5.0, 7.2, 9.4, 11.6, 13.8]] actual = ragged_math_ops.range([0.0, 3.0, 5.0], [3.9, 4.0, 15.0], [0.4, 1.5, 2.2]) self.assertAllClose(actual, expected) def testNegativeDeltas(self): self.assertAllEqual( ragged_math_ops.range([0, 3, 5], limits=0, deltas=-1), [list(range(0, 0, -1)), list(range(3, 0, -1)), list(range(5, 0, -1))]) self.assertAllEqual( ragged_math_ops.range([0, -3, 5], limits=0, deltas=[-1, 1, -2]), [list(range(0, 0, -1)), list(range(-3, 0, 1)), list(range(5, 0, -2))]) def testBroadcast(self): # Specify starts and limits, broadcast deltas. self.assertAllEqual( ragged_math_ops.range([0, 3, 5], [4, 4, 15], 3), [list(range(0, 4, 3)), list(range(3, 4, 3)), list(range(5, 15, 3))]) # Broadcast all arguments. self.assertAllEqual( ragged_math_ops.range(0, 5, 1), [list(range(0, 5, 1))]) def testEmptyRanges(self): rt1 = ragged_math_ops.range([0, 5, 3], [0, 3, 5]) rt2 = ragged_math_ops.range([0, 5, 5], [0, 3, 5], -1) self.assertAllEqual(rt1, [[], [], [3, 4]]) self.assertAllEqual(rt2, [[], [5, 4], []]) def testShapeFnErrors(self): self.assertRaises((ValueError, errors.InvalidArgumentError), ragged_math_ops.range, [[0]], 5) self.assertRaises((ValueError, errors.InvalidArgumentError), ragged_math_ops.range, 0, [[5]]) self.assertRaises((ValueError, errors.InvalidArgumentError), ragged_math_ops.range, 0, 5, [[0]]) self.assertRaises((ValueError, errors.InvalidArgumentError), ragged_math_ops.range, [0], [1, 2]) def testKernelErrors(self): with self.assertRaisesRegexp(errors.InvalidArgumentError, r'Requires delta != 0'): self.evaluate(ragged_math_ops.range(0, 0, 0)) def testShape(self): self.assertAllEqual( ragged_math_ops.range(0, 0, 1).shape.as_list(), [1, None]) self.assertAllEqual( ragged_math_ops.range([1, 2, 3]).shape.as_list(), [3, None]) self.assertAllEqual( ragged_math_ops.range([1, 2, 3], [4, 5, 6]).shape.as_list(), [3, None]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_range_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.ragged in eager execution mode.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ dict(pylist=[[b'a', b'b'], [b'c']]), dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]), dict(pylist=[[[1, 2], [3, 4]], [[5, 6], [], [7, 8]]], ragged_rank=1), ]) def testRaggedTensorToList(self, pylist, ragged_rank=None): rt = ragged_factory_ops.constant(pylist, ragged_rank) self.assertAllEqual(rt, pylist) @parameterized.parameters([ dict(pylist=[[b'a', b'b'], [b'c']]), dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]), ]) def testRaggedTensorStr(self, pylist): rt = ragged_factory_ops.constant(pylist) self.assertEqual(str(rt), '<tf.RaggedTensor %s>' % pylist) if __name__ == '__main__': ops.enable_eager_execution() googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_eager_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for converting between row_splits and segment_ids.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.util.tf_export import tf_export # For background on "segments" and "segment ids", see: # https://www.tensorflow.org/api_docs/python/tf/math#Segmentation @tf_export("ragged.row_splits_to_segment_ids") def row_splits_to_segment_ids(splits, name=None, out_type=None): """Generates the segmentation corresponding to a RaggedTensor `row_splits`. Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if `splits[j] <= i < splits[j+1]`. Example: ```python >>> ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]).eval() [ 0 0 0 2 2 3 4 4 4 ] ``` Args: splits: A sorted 1-D integer Tensor. `splits[0]` must be zero. name: A name prefix for the returned tensor (optional). out_type: The dtype for the return value. Defaults to `splits.dtype`, or `tf.int64` if `splits` does not have a dtype. Returns: A sorted 1-D integer Tensor, with `shape=[splits[-1]]` Raises: ValueError: If `splits` is invalid. """ with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name: splits = ops.convert_to_tensor( splits, name="splits", preferred_dtype=dtypes.int64) if splits.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("splits must have dtype int32 or int64") splits.shape.assert_has_rank(1) if tensor_shape.dimension_value(splits.shape[0]) == 0: raise ValueError("Invalid row_splits: []") if out_type is None: out_type = splits.dtype else: out_type = dtypes.as_dtype(out_type) row_lengths = splits[1:] - splits[:-1] nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1 indices = math_ops.range(nrows) return ragged_util.repeat(indices, repeats=row_lengths, axis=0) # For background on "segments" and "segment ids", see: # https://www.tensorflow.org/api_docs/python/tf/math#Segmentation @tf_export("ragged.segment_ids_to_row_splits") def segment_ids_to_row_splits(segment_ids, num_segments=None, out_type=None, name=None): """Generates the RaggedTensor `row_splits` corresponding to a segmentation. Returns an integer vector `splits`, where `splits[0] = 0` and `splits[i] = splits[i-1] + count(segment_ids==i)`. Example: ```python >>> ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]).eval() [ 0 3 3 5 6 9 ] ``` Args: segment_ids: A 1-D integer Tensor. num_segments: A scalar integer indicating the number of segments. Defaults to `max(segment_ids) + 1` (or zero if `segment_ids` is empty). out_type: The dtype for the return value. Defaults to `segment_ids.dtype`, or `tf.int64` if `segment_ids` does not have a dtype. name: A name prefix for the returned tensor (optional). Returns: A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`. """ if out_type is None: if isinstance(segment_ids, ops.Tensor): out_type = segment_ids.dtype elif isinstance(num_segments, ops.Tensor): out_type = num_segments.dtype else: out_type = dtypes.int64 else: out_type = dtypes.as_dtype(out_type) with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name: # Note: we cast int64 tensors to int32, since bincount currently only # supports int32 inputs. segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids", dtype=dtypes.int32) segment_ids.shape.assert_has_rank(1) if num_segments is not None: num_segments = ragged_util.convert_to_int_tensor(num_segments, "num_segments", dtype=dtypes.int32) num_segments.shape.assert_has_rank(0) row_lengths = math_ops.bincount( segment_ids, minlength=num_segments, maxlength=num_segments, dtype=out_type) splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) # Update shape information, if possible. if num_segments is not None: const_num_segments = tensor_util.constant_value(num_segments) if const_num_segments is not None: splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1])) return splits
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/segment_id_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operator overloads for `RaggedTensor`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_getitem from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import tf_decorator def _right(operator): """Right-handed version of an operator: swap args x and y.""" return tf_decorator.make_decorator(operator, lambda y, x: operator(x, y)) # Indexing ragged_tensor.RaggedTensor.__getitem__ = ragged_getitem.ragged_tensor_getitem # Ordering operators ragged_tensor.RaggedTensor.__ge__ = math_ops.greater_equal ragged_tensor.RaggedTensor.__gt__ = math_ops.greater ragged_tensor.RaggedTensor.__le__ = math_ops.less_equal ragged_tensor.RaggedTensor.__lt__ = math_ops.less # Logical operators ragged_tensor.RaggedTensor.__and__ = math_ops.logical_and ragged_tensor.RaggedTensor.__rand__ = _right(math_ops.logical_and) ragged_tensor.RaggedTensor.__invert__ = math_ops.logical_not ragged_tensor.RaggedTensor.__ror__ = _right(math_ops.logical_or) ragged_tensor.RaggedTensor.__or__ = math_ops.logical_or ragged_tensor.RaggedTensor.__xor__ = math_ops.logical_xor ragged_tensor.RaggedTensor.__rxor__ = _right(math_ops.logical_xor) # Arithmetic operators ragged_tensor.RaggedTensor.__abs__ = math_ops.abs ragged_tensor.RaggedTensor.__add__ = math_ops.add ragged_tensor.RaggedTensor.__radd__ = _right(math_ops.add) ragged_tensor.RaggedTensor.__div__ = math_ops.div ragged_tensor.RaggedTensor.__rdiv__ = _right(math_ops.div) ragged_tensor.RaggedTensor.__floordiv__ = math_ops.floordiv ragged_tensor.RaggedTensor.__rfloordiv__ = _right(math_ops.floordiv) ragged_tensor.RaggedTensor.__mod__ = math_ops.floormod ragged_tensor.RaggedTensor.__rmod__ = _right(math_ops.floormod) ragged_tensor.RaggedTensor.__mul__ = math_ops.multiply ragged_tensor.RaggedTensor.__rmul__ = _right(math_ops.multiply) ragged_tensor.RaggedTensor.__neg__ = math_ops.negative ragged_tensor.RaggedTensor.__pow__ = math_ops.pow ragged_tensor.RaggedTensor.__rpow__ = _right(math_ops.pow) ragged_tensor.RaggedTensor.__sub__ = math_ops.subtract ragged_tensor.RaggedTensor.__rsub__ = _right(math_ops.subtract) ragged_tensor.RaggedTensor.__truediv__ = math_ops.truediv ragged_tensor.RaggedTensor.__rtruediv__ = _right(math_ops.truediv) # Dummy methods def _dummy_bool(_): """Dummy method to prevent a RaggedTensor from being used as a Python bool.""" raise TypeError("RaggedTensor may not be used as a boolean.") ragged_tensor.RaggedTensor.__bool__ = _dummy_bool ragged_tensor.RaggedTensor.__nonzero__ = _dummy_bool
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_operators.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ragged Tensors. This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`), which are tensors with non-uniform shapes. In particular, each `RaggedTensor` has one or more *ragged dimensions*, which are dimensions whose slices may have different lengths. For example, the inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed description of ragged tensors, see the `tf.RaggedTensor` class documentation and the [Ragged Tensor Guide](/guide/ragged_tensors). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_batch_gather_ops from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_conversion_ops from tensorflow.python.ops.ragged import ragged_dispatch from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_getitem from tensorflow.python.ops.ragged import ragged_map_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_operators from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_shape from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.ops.ragged import ragged_where_op from tensorflow.python.ops.ragged import segment_id_ops # Add a list of the ops that support Ragged Tensors. __doc__ += ragged_dispatch.ragged_op_list() # pylint: disable=redefined-builtin
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.row_lengths.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedRowLengthsOp(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # Docstring Example dict( rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []], expected=[2, 0, 2, 1, 0]), dict( rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []], axis=2, expected=[[3, 1], [], [2, 1], [1], []]), # 2D Tensor (1 ragged dimension) dict( rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']], expected=[1, 3, 1, 0, 1]), dict( rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']], axis=0, expected=5), dict( rt_input=[['a', 'b', 'c', 'd', 'e', 'f', 'g']], expected=[7]), dict( rt_input=[[], ['a', 'b', 'c', 'd', 'e', 'f', 'g'], []], expected=[0, 7, 0]), dict( rt_input=[], ragged_rank=1, expected=[]), dict( rt_input=[], ragged_rank=1, axis=0, expected=0), # 3D Tensor (1 ragged dimension) dict( rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]], ragged_rank=1, axis=0, expected=2), dict( rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]], ragged_rank=1, axis=1, expected=[3, 2]), dict( rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]], ragged_rank=1, axis=2, expected=[[2, 2, 2], [2, 2]], expected_ragged_rank=1), # 3D Tensor (2 ragged dimensions) dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=0, expected=2), dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=-3, expected=2), dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=1, expected=[3, 2]), dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=-2, expected=[3, 2]), dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=2, expected=[[2, 3, 0], [4, 1]], expected_ragged_rank=1), dict( rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]], axis=-1, expected=[[2, 3, 0], [4, 1]], expected_ragged_rank=1), ]) # pyformat: disable def testRowLengths(self, rt_input, expected, axis=1, ragged_rank=None, expected_ragged_rank=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) lengths = rt.row_lengths(axis) self.assertAllEqual(lengths, expected) if expected_ragged_rank is not None: if isinstance(lengths, ragged_tensor.RaggedTensor): self.assertEqual(lengths.ragged_rank, expected_ragged_rank) else: self.assertEqual(0, expected_ragged_rank) @parameterized.parameters([ dict( # axis=2 out of bounds: expected -2<=axis<2. rt_input=[[10, 20], [30]], axis=2, exception=(ValueError, errors.InvalidArgumentError)), dict( # axis=-3 out of bounds: expected -2<=axis<2. rt_input=[[2, 3, 0], [4, 1, 2]], axis=-3, exception=(ValueError, errors.InvalidArgumentError)), ]) def testErrors(self, rt_input, exception, message=None, axis=1): rt = ragged_factory_ops.constant(rt_input) with self.assertRaisesRegexp(exception, message): rt.row_lengths(axis) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.ragged.ragged_tensor_shape.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_shape from tensorflow.python.ops.ragged.ragged_tensor_shape import RaggedTensorDynamicShape from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase): def assertShapeEq(self, x, y): assert isinstance(x, RaggedTensorDynamicShape) assert isinstance(y, RaggedTensorDynamicShape) self.assertLen(x.partitioned_dim_sizes, len(y.partitioned_dim_sizes)) for x_dims, y_dims in zip(x.partitioned_dim_sizes, y.partitioned_dim_sizes): self.assertAllEqual(x_dims, y_dims) self.assertAllEqual(x.inner_dim_sizes, y.inner_dim_sizes) @parameterized.parameters([ dict(value='x', expected_dim_sizes=[]), dict(value=['a', 'b', 'c'], expected_dim_sizes=[3]), dict(value=[['a', 'b', 'c'], ['d', 'e', 'f']], expected_dim_sizes=[2, 3]), dict( value=[[['a', 'b', 'c'], ['d', 'e', 'f']]], expected_dim_sizes=[1, 2, 3]), dict( value=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]), expected_dim_sizes=[2, [3, 2]]), dict( value=ragged_factory_ops.constant_value([[['a', 'b', 'c'], ['d', 'e']]]), expected_dim_sizes=[1, [2], [3, 2]]), dict( value=ragged_factory_ops.constant_value( [[['a', 'b', 'c'], ['d', 'e', 'f']]], ragged_rank=1), expected_dim_sizes=[1, [2], 3]), dict( value=ragged_factory_ops.constant_value( [[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1), expected_dim_sizes=[2, [2, 1], 2, 1]), dict( value=ragged_factory_ops.constant_value([[10, 20], [30]]), expected_dim_sizes=[2, [2, 1]]), # Docstring examples: dict(value=[[1, 2, 3], [4, 5, 6]], expected_dim_sizes=[2, 3]), dict( value=ragged_factory_ops.constant_value([[1, 2], [], [3, 4, 5]]), expected_dim_sizes=[3, [2, 0, 3]]), dict( value=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5, 6]]], ragged_rank=1), expected_dim_sizes=[2, [2, 1], 2]), dict( value=ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]), expected_dim_sizes=[2, [2, 1], [2, 1, 2]]), ]) def testFromTensor(self, value, expected_dim_sizes): shape = RaggedTensorDynamicShape.from_tensor(value) expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes) self.assertShapeEq(shape, expected) @parameterized.parameters([ dict(dim_sizes=[], rank=0, expected_dim_sizes=[]), dict(dim_sizes=[], rank=3, expected_dim_sizes=[1, 1, 1]), dict(dim_sizes=[3], rank=1, expected_dim_sizes=[3]), dict(dim_sizes=[3], rank=3, expected_dim_sizes=[1, 1, 3]), dict(dim_sizes=[2, 3], rank=3, expected_dim_sizes=[1, 2, 3]), dict(dim_sizes=[3, [3, 2, 4]], rank=2, expected_dim_sizes=[3, [3, 2, 4]]), dict( dim_sizes=[3, [3, 2, 4]], rank=4, expected_dim_sizes=[1, 1, 3, [3, 2, 4]]), dict( dim_sizes=[3, [3, 2, 4], 2, 3], rank=5, expected_dim_sizes=[1, 3, [3, 2, 4], 2, 3]), ]) def testBroadcastToRank(self, dim_sizes, rank, expected_dim_sizes): shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes) expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes) broadcasted_shape = shape.broadcast_to_rank(rank) self.assertShapeEq(broadcasted_shape, expected) self.assertEqual(broadcasted_shape.rank, rank) @parameterized.parameters([ #========================================================================= # dimension[axis] is uniform inner; and row_lengths is a scalar #========================================================================= # shape: [BROADCAST(UNIFORM), UNIFORM, UNIFORM] dict(axis=0, row_length=3, original_dim_sizes=[1, 4, 5], broadcast_dim_sizes=[3, 4, 5]), # shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)] dict(axis=2, row_length=5, original_dim_sizes=[3, 4, 1], broadcast_dim_sizes=[3, 4, 5]), # shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)] dict(axis=2, row_length=5, original_dim_sizes=[3, [3, 2, 8], 1], broadcast_dim_sizes=[3, [3, 2, 8], 5]), # shape: [UNIFORM, RAGGED, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM)] dict(axis=5, row_length=5, original_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 1], broadcast_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 5]), #========================================================================= # dimension[axis] is uniform inner; and row_lengths is a vector #========================================================================= # shape: [UNIFORM, BROADCAST(UNIFORM)] dict(axis=1, row_length=[2, 0, 1], original_dim_sizes=[3, 1], broadcast_dim_sizes=[3, [2, 0, 1]]), # shape: [UNIFORM, BROADCAST(UNIFORM), UNIFORM] dict(axis=1, row_length=[2, 0, 1], original_dim_sizes=[3, 1, 5], broadcast_dim_sizes=[3, [2, 0, 1], 5]), # shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)] dict(axis=2, row_length=[2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0], original_dim_sizes=[4, 3, 1], broadcast_dim_sizes=[4, 3, [2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0]]), # shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)] dict(axis=2, row_length=[2, 5, 3], original_dim_sizes=[2, [2, 1], 1], broadcast_dim_sizes=[2, [2, 1], [2, 5, 3]]), # shape: [UNIFORM, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM), UNIFORM] dict(axis=4, row_length=list(range(18)), original_dim_sizes=[2, [2, 1], 3, 2, 1, 8], broadcast_dim_sizes=[2, [2, 1], 3, 2, list(range(18)), 8]), #========================================================================= # dimension[axis] is uniform partitioned; and row_lengths is a scalar #========================================================================= # shape: [BROADCAST(UNIFORM), RAGGED] dict(axis=0, row_length=3, original_dim_sizes=[1, [5]], broadcast_dim_sizes=[3, [5, 5, 5]]), # shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED] dict(axis=0, row_length=2, original_dim_sizes=[1, 3, [3, 0, 2]], broadcast_dim_sizes=[2, 3, [3, 0, 2, 3, 0, 2]]), # shape: [BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM, UNIFORM] dict(axis=0, row_length=3, original_dim_sizes=[1, [3], [3, 5, 2], 9, 4, 5], broadcast_dim_sizes=[3, [3, 3, 3], [3, 5, 2, 3, 5, 2, 3, 5, 2], 9, 4, 5]), # shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED, UNIFORM] dict(axis=0, row_length=2, original_dim_sizes=[1, 2, [2, 1], [3, 5, 2], 2], broadcast_dim_sizes=[2, 2, [2, 1, 2, 1], [3, 5, 2, 3, 5, 2], 2]), # shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM] dict(axis=1, row_length=2, original_dim_sizes=[3, 1, [4, 0, 2], 5], broadcast_dim_sizes=[3, 2, [4, 0, 2, 4, 0, 2], 5]), # shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED] dict(axis=1, row_length=1, original_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)], broadcast_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)]), #========================================================================= # dimension[axis] is uniform partitioned; and row_lengths is a vector #========================================================================= # shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM] dict(axis=1, row_length=[4, 1, 2], original_dim_sizes=[ 3, # axis=0 1, # axis=1 (broadcast) [3, 1, 2], # axis=2 5], # axis=3 broadcast_dim_sizes=[ 3, # axis=0 [4, 1, 2], # axis=1 (broadcast) [3, 3, 3, 3, 1, 2, 2], # axis=2 5]), # axis=3 # shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, RAGGED] dict(axis=1, row_length=[2, 0, 3], original_dim_sizes=[ 3, # axis=0 1, # axis=1 (broadcast) [3, 1, 2], # axis=2 [3, 1, 4, 1, 5, 9]], # axis=3 broadcast_dim_sizes=[ 3, # axis=0 [2, 0, 3], # axis=1 (broadcast) [3, 3, 2, 2, 2], # axis=2 [3, 1, 4, 3, 1, 4, 5, 9, 5, 9, 5, 9]]), # axis=3 # shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM] dict(axis=2, row_length=[4, 1, 2], original_dim_sizes=[ 3, # axis=0 [2, 0, 1], # axis=1 1, # axis=2 (broadcast) [3, 2, 1], # axis=3 [1, 0, 1, 0, 2, 3], # axis=4 5], # axis=5 broadcast_dim_sizes=[ 3, # axis=0 [2, 0, 1], # axis=2 [4, 1, 2], # axis=2 (broadcast) [3, 3, 3, 3, 2, 1, 1], # axis=3 [1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, # axis=4 2, 3, 3], 5]), # axis=5 dict(axis=0, row_length=2, original_dim_sizes=[1, 1, 2, (2, 1)], broadcast_dim_sizes=[2, 1, 2, (2, 1, 2, 1)]), dict(axis=1, row_length=(2, 1), original_dim_sizes=[2, 1, 2, (2, 1, 2, 1)], broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]), dict(axis=2, row_length=2, original_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)], broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]), dict(axis=3, row_length=(2, 1, 2, 1, 2, 1), original_dim_sizes=[2, (2, 1), 2, 1], broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]), ]) # pyformat: disable def testBroadcastDimension(self, axis, row_length, original_dim_sizes, broadcast_dim_sizes): """Tests for the broadcast_dimension method. Verifies that: * `original.broadcast_dimension(axis, row_length) == broadcast` * `broadcast.broadcast_dimension(axis, row_length) == broadcast` * `broadcast.broadcast_dimension(axis, 1) == broadcast` Args: axis: The axis to broadcast row_length: The slice lengths to broadcast to. original_dim_sizes: The dimension sizes before broadcasting. original_dim_sizes[axis] should be equal to `1` or `row_length`. broadcast_dim_sizes: THe dimension sizes after broadcasting. """ original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes) bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes) self.assertEqual(original_shape.rank, bcast_shape.rank) # shape[axis].value == 1 and row_length > 1: bcast1 = original_shape.broadcast_dimension(axis, row_length) # shape[axis].value > 1 and row_length == shape[axis].value: bcast2 = bcast_shape.broadcast_dimension(axis, row_length) # shape[axis].value > 1 and row_length == 1: bcast3 = bcast_shape.broadcast_dimension(axis, 1) self.assertShapeEq(bcast1, bcast_shape) self.assertShapeEq(bcast2, bcast_shape) self.assertShapeEq(bcast3, bcast_shape) @parameterized.parameters( [ # Broadcast scalar dict(x_dims=[], y_dims=[], expected_dims=[]), dict(x_dims=[], y_dims=[2], expected_dims=[2]), dict(x_dims=[], y_dims=[2, 3], expected_dims=[2, 3]), dict( x_dims=[], y_dims=[2, (2, 3), (5, 7, 2, 0, 9)], expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]), # Broadcast vector dict(x_dims=[3], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]), dict(x_dims=[1], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]), dict(x_dims=[3], y_dims=[4, 2, 1], expected_dims=[4, 2, 3]), dict( x_dims=[3], y_dims=[3, (2, 3, 1), 1], expected_dims=[3, (2, 3, 1), 3]), dict(x_dims=[1], y_dims=[3, (2, 1, 3)], expected_dims=[3, (2, 1, 3)]), dict( x_dims=[1], y_dims=[3, (2, 1, 3), 8], expected_dims=[3, (2, 1, 3), 8]), dict( x_dims=[1], y_dims=[2, (2, 3), (5, 7, 2, 0, 9)], expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]), # Mixed broadcasting dict( x_dims=[ 1, # axis=0 3, # axis=1 (3, 0, 2), # axis=2 1, # axis=3 2, # axis=4 ], y_dims=[ 2, # axis=0 1, # axis=1 1, # axis=2 (7, 2), # axis=3 1, # axis=4 ], expected_dims=[ 2, # axis=0 3, # axis=1 (3, 0, 2, 3, 0, 2), # axis=2 (7, 7, 7, 7, 7, 2, 2, 2, 2, 2), # axis=3 2, # axis=4 ]), dict( x_dims=[2, (2, 1), 2, 1], y_dims=[1, 1, 2, (2, 1)], expected_dims=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]), ]) def testBroadcastDynamicShape(self, x_dims, y_dims, expected_dims): x_shape = RaggedTensorDynamicShape.from_dim_sizes(x_dims) y_shape = RaggedTensorDynamicShape.from_dim_sizes(y_dims) expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dims) result1 = ragged_tensor_shape.broadcast_dynamic_shape(x_shape, y_shape) result2 = ragged_tensor_shape.broadcast_dynamic_shape(y_shape, x_shape) self.assertShapeEq(expected, result1) self.assertShapeEq(expected, result2) def testRepr(self): shape = RaggedTensorDynamicShape.from_dim_sizes([2, (2, 1), 2, 1]) self.assertRegexpMatches( repr(shape), r'RaggedTensorDynamicShape\(' r'partitioned_dim_sizes=\(<[^>]+>, <[^>]+>\), ' r'inner_dim_sizes=<[^>]+>\)') @parameterized.parameters( [ dict( x=[[10], [20], [30]], # shape=[3, 1] dim_sizes=[3, 2], expected=[[10, 10], [20, 20], [30, 30]]), dict( x=[[10], [20], [30]], # shape=[3, 1] dim_sizes=[3, [3, 0, 2]], expected=ragged_factory_ops.constant_value( [[10, 10, 10], [], [30, 30]], dtype=np.int32)), dict( x=[[[1, 2, 3]], [[4, 5, 6]]], # shape = [2, 1, 3] dim_sizes=[2, [2, 3], 3], expected=ragged_factory_ops.constant_value( [[[1, 2, 3], [1, 2, 3]], [[4, 5, 6], [4, 5, 6], [4, 5, 6]]], dtype=np.int32, ragged_rank=1)), dict( x=[[[1]], [[2]]], # shape = [2, 1, 1] dim_sizes=[2, [2, 3], [0, 2, 1, 2, 0]], expected=ragged_factory_ops.constant_value( [[[], [1, 1]], [[2], [2, 2], []]], dtype=np.int32, ragged_rank=2)), dict( x=10, dim_sizes=[3, [3, 0, 2]], expected=ragged_factory_ops.constant_value([[10, 10, 10], [], [10, 10]])), ]) def testRaggedBroadcastTo(self, x, dim_sizes, expected): shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes) result = ragged_tensor_shape.broadcast_to(x, shape) self.assertEqual( getattr(result, 'ragged_rank', 0), getattr(expected, 'ragged_rank', 0)) self.assertAllEqual(result, expected) @parameterized.parameters( [ dict( doc='x.shape=[3, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]', x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]], dtype=np.int32), y=[[10], [20], [30]], expected=ragged_factory_ops.constant_value([[11, 12, 13], [], [34, 35]])), dict( doc='x.shape=[3, (D1)]; y.shape=[]; bcast.shape=[3, (D1)]', x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]], dtype=np.int32), y=10, expected=ragged_factory_ops.constant_value([[11, 12, 13], [], [14, 15]])), dict( doc='x.shape=[1, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]', x=ragged_factory_ops.constant_value([[1, 2, 3]], dtype=np.int32), y=[[10], [20], [30]], expected=ragged_factory_ops.constant_value( [[11, 12, 13], [21, 22, 23], [31, 32, 33]], dtype=np.int32)), dict( doc=('x.shape=[2, (D1), 1]; y.shape=[1, (D2)]; ' 'bcast.shape=[2, (D1), (D2)]'), x=ragged_factory_ops.constant_value([[[1], [2], [3]], [[4]]], ragged_rank=1), y=ragged_factory_ops.constant_value([[10, 20, 30]]), expected=ragged_factory_ops.constant_value([[[11, 21, 31], [12, 22, 32], [13, 23, 33]], [[14, 24, 34]]])), dict( doc=('x.shape=[2, (D1), 1]; y.shape=[1, 1, 4]; ' 'bcast.shape=[2, (D1), 4]'), x=ragged_factory_ops.constant_value([[[10], [20]], [[30]]], ragged_rank=1), y=[[[1, 2, 3, 4]]], expected=ragged_factory_ops.constant_value( [[[11, 12, 13, 14], [21, 22, 23, 24]], [[31, 32, 33, 34]]], ragged_rank=1)), dict( doc=('x.shape=[2, (D1), 2, 1]; y.shape=[2, (D2)]; ' 'bcast.shape=[2, (D1), (2), (D2)'), x=ragged_factory_ops.constant_value( [[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1), y=ragged_factory_ops.constant_value([[10, 20], [30]]), expected=ragged_factory_ops.constant_value([[[[11, 21], [32]], [[13, 23], [34]]], [[[15, 25], [36]]]])), ]) def testRaggedAddWithBroadcasting(self, x, y, expected, doc): expected_rrank = getattr(expected, 'ragged_rank', 0) x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32) y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32) result = x + y result_rrank = getattr(result, 'ragged_rank', 0) self.assertEqual(expected_rrank, result_rrank) if hasattr(expected, 'tolist'): expected = expected.tolist() self.assertAllEqual(result, expected) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor_shape_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for constructing RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.util.tf_export import tf_export #=============================================================================== # Op to construct a constant RaggedTensor from a nested Python list. #=============================================================================== @tf_export("ragged.constant") def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None, name=None, row_splits_dtype=dtypes.int64): """Constructs a constant RaggedTensor from a nested Python list. Example: ```python >>> ragged.constant([[1, 2], [3], [4, 5, 6]]).eval() RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6]) ``` All scalar values in `pylist` must have the same nesting depth `K`, and the returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar values, then `K` is one greater than the maximum depth of empty lists in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. Args: pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that is not a `list`, `tuple` or `np.ndarray` must be a scalar value compatible with `dtype`. dtype: The type of elements for the returned `RaggedTensor`. If not specified, then a default is chosen based on the scalar values in `pylist`. ragged_rank: An integer specifying the ragged rank of the returned `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank` is not specified. If `ragged_rank` is specified, then a default is chosen based on the contents of `pylist`. name: A name prefix for the returned tensor (optional). row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits. One of `tf.int32` or `tf.int64`. Returns: A potentially ragged tensor with rank `K` and the specified `ragged_rank`, containing the values from `pylist`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`. """ def ragged_factory(values, row_splits): row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype) return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits, validate=False) with ops.name_scope(name, "RaggedConstant"): return _constant_value(ragged_factory, constant_op.constant, pylist, dtype, ragged_rank, inner_shape) @tf_export(v1=["ragged.constant_value"]) def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None, row_splits_dtype="int64"): """Constructs a RaggedTensorValue from a nested Python list. Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`. If you wish to construct a constant `RaggedTensor`, use [`ragged.constant(...)`](constant.md) instead. Example: ```python >>> ragged.constant_value([[1, 2], [3], [4, 5, 6]]) RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6]) ``` All scalar values in `pylist` must have the same nesting depth `K`, and the returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no scalar values, then `K` is one greater than the maximum depth of empty lists in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. Args: pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that is not a `list` or `tuple` must be a scalar value compatible with `dtype`. dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`. If not specified, then a default is chosen based on the scalar values in `pylist`. ragged_rank: An integer specifying the ragged rank of the returned `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned `RaggedTensorValue`. Defaults to `()` if `ragged_rank` is not specified. If `ragged_rank` is specified, then a default is chosen based on the contents of `pylist`. row_splits_dtype: data type for the constructed `RaggedTensorValue`'s row_splits. One of `numpy.int32` or `numpy.int64`. Returns: A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified `ragged_rank`, containing the values from `pylist`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`. """ if dtype is not None and isinstance(dtype, dtypes.DType): dtype = dtype.as_numpy_dtype row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype def _ragged_factory(values, row_splits): row_splits = np.array(row_splits, dtype=row_splits_dtype) return ragged_tensor_value.RaggedTensorValue(values, row_splits) def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument return np.reshape(np.array(pylist, dtype=dtype), shape) return _constant_value(_ragged_factory, _inner_factory, pylist, dtype, ragged_rank, inner_shape) def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank, inner_shape): """Constructs a constant RaggedTensor or RaggedTensorValue. Args: ragged_factory: A factory function with the signature: `ragged_factory(values, row_splits)` inner_factory: A factory function with the signature: `inner_factory(pylist, dtype, shape, name)` pylist: A nested `list`, `tuple` or `np.ndarray`. dtype: Data type for returned value. ragged_rank: Ragged rank for returned value. inner_shape: Inner value shape for returned value. Returns: A value returned by `ragged_factory` or `inner_factory`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`. """ if ragged_tensor.is_ragged(pylist): raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.") # np.ndim builds an array, so we short-circuit lists and tuples. if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0: # Scalar value if ragged_rank is not None and ragged_rank != 0: raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" % (pylist, ragged_rank)) if inner_shape is not None and inner_shape: raise ValueError( "Invalid pylist=%r: incompatible with dim(inner_shape)=%d" % (pylist, len(inner_shape))) return inner_factory(pylist, dtype, ()) if ragged_rank is not None and ragged_rank < 0: raise ValueError( "Invalid ragged_rank=%r: must be nonnegative" % ragged_rank) # Find the depth of scalar values in `pylist`. scalar_depth, max_depth = _find_scalar_and_max_depth(pylist) if scalar_depth is not None: if max_depth > scalar_depth: raise ValueError("Invalid pylist=%r: empty list nesting is greater " "than scalar value nesting" % pylist) # If both inner_shape and ragged_rank were specified, then check that # they are compatible with pylist. if inner_shape is not None and ragged_rank is not None: expected_depth = ragged_rank + len(inner_shape) + 1 if ((scalar_depth is not None and expected_depth != scalar_depth) or (scalar_depth is None and expected_depth < max_depth)): raise ValueError( "Invalid pylist=%r: incompatible with ragged_rank=%d " "and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape))) # Check if the result is a `Tensor`. if (ragged_rank == 0 or (ragged_rank is None and ((max_depth < 2) or (inner_shape is not None and max_depth - len(inner_shape) < 2)))): return inner_factory(pylist, dtype, inner_shape) # Compute default value for inner_shape. if inner_shape is None: if ragged_rank is None: inner_shape = () else: inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank) # Compute default value for ragged_rank. if ragged_rank is None: if scalar_depth is None: ragged_rank = max(1, max_depth - 1) else: ragged_rank = max(1, scalar_depth - 1 - len(inner_shape)) # Build the splits for each ragged rank, and concatenate the inner values # into a single list. nested_splits = [] values = pylist for dim in range(ragged_rank): nested_splits.append([0]) concatenated_values = [] for row in values: nested_splits[dim].append(nested_splits[dim][-1] + len(row)) concatenated_values.extend(row) values = concatenated_values values = inner_factory( values, dtype=dtype, shape=(len(values),) + inner_shape, name="values") for row_splits in reversed(nested_splits): values = ragged_factory(values, row_splits) return values def _find_scalar_and_max_depth(pylist): """Finds nesting depth of scalar values in pylist. Args: pylist: A nested python `list` or `tuple`. Returns: A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting depth of scalar values in `pylist`, or `None` if `pylist` contains no scalars. `max_depth` is the maximum depth of `pylist` (including empty lists). Raises: ValueError: If pylist has inconsistent nesting depths for scalars. """ # Check if pylist is not scalar. np.ndim builds an array, so we # short-circuit lists and tuples. if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0: scalar_depth = None max_depth = 1 for child in pylist: child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child) if child_scalar_depth is not None: if scalar_depth is not None and scalar_depth != child_scalar_depth + 1: raise ValueError("all scalar values must have the same nesting depth") scalar_depth = child_scalar_depth + 1 max_depth = max(max_depth, child_max_depth + 1) return (scalar_depth, max_depth) return (0, 0) def _default_inner_shape_for_pylist(pylist, ragged_rank): """Computes a default inner shape for the given python list.""" def get_inner_shape(item): """Returns the inner shape for a python list `item`.""" if not isinstance(item, (list, tuple)) and np.ndim(item) == 0: return () elif item: return (len(item),) + get_inner_shape(item[0]) return (0,) def check_inner_shape(item, shape): """Checks that `item` has a consistent shape matching `shape`.""" is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0 if is_nested != bool(shape): raise ValueError("inner values have inconsistent shape") if is_nested: if shape[0] != len(item): raise ValueError("inner values have inconsistent shape") for child in item: check_inner_shape(child, shape[1:]) # Collapse the ragged layers to get the list of inner values. flat_values = pylist for dim in range(ragged_rank): if not all( isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values): raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d " "requires scalar value depth greater than %d" % (dim + 1, ragged_rank, ragged_rank)) flat_values = sum((list(v) for v in flat_values), []) # Compute the inner shape looking only at the leftmost elements; and then # use check_inner_shape to verify that other elements have the same shape. inner_shape = get_inner_shape(flat_values) check_inner_shape(flat_values, inner_shape) return inner_shape[1:] @tf_export(v1=["ragged.placeholder"]) def placeholder(dtype, ragged_rank, value_shape=None, name=None): """Creates a placeholder for a `tf.RaggedTensor` that will always be fed. **Important**: This ragged tensor will produce an error if evaluated. Its value must be fed using the `feed_dict` optional argument to `Session.run()`, `Tensor.eval()`, or `Operation.run()`. @compatibility{eager} Placeholders are not compatible with eager execution. Args: dtype: The data type for the `RaggedTensor`. ragged_rank: The ragged rank for the `RaggedTensor` value_shape: The shape for individual flat values in the `RaggedTensor`. name: A name for the operation (optional). Returns: A `RaggedTensor` that may be used as a handle for feeding a value, but not evaluated directly. Raises: RuntimeError: if eager execution is enabled """ if ragged_rank == 0: return array_ops.placeholder(dtype, value_shape, name) with ops.name_scope(name, "RaggedPlaceholder", []): flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape) result = array_ops.placeholder(dtype, flat_shape, "flat_values") for i in reversed(range(ragged_rank)): row_splits = array_ops.placeholder(dtypes.int64, [None], "row_splits_%d" % i) result = ragged_tensor.RaggedTensor(result, row_splits, internal=True) return result
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_factory_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gather operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_ragged_array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor #=============================================================================== # ragged_gather #=============================================================================== # TODO(edloper): Add an `axis` argument def gather(params, indices, validate_indices=None, axis=0, batch_dims=0, name=None): """Gathers ragged slices from `params` axis `0` according to `indices`. Returns `RaggedTensor` output, such that: ```python output.shape = indices.shape + params.shape[1:] output.ragged_rank = indices.shape.ndims + params.ragged_rank output[i...j, d0...dn] = params[indices[i...j], d0...dn] ``` `params` may be ragged. `indices` may be ragged. `indices` must have dtype `int32` or `int64`. If any index is out of bounds, then an error is returned. Examples: ```python >>> params = tf.constant(['a', 'b', 'c', 'd', 'e']) >>> indices = tf.constant([3, 1, 2, 1, 0]) >>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']]) >>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]]) >>> print ragged.gather(params, ragged_indices) [['d', 'b', 'c'], ['b'], [], ['a']] >>> print ragged.gather(ragged_params, indices) [['e'], ['d'], [], ['d'], ['a', 'b', 'c']] >>> print ragged.gather(ragged_params, ragged_indices) [[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]] ``` Args: params: The potentially ragged tensor from which to gather values. Must be at least rank 1. indices: The potentially ragged tensor indicating which values to gather. Must have dtype `int32` or `int64`. Values must be in the range `[0, params.shape[0]]`. validate_indices: Ignored. axis: Must be zero. batch_dims: Must be zero. name: A name for the operation (optional). Returns: A `RaggedTensor`, where `output.dtype=params.dtype` and `output.shape=indices.shape + params.shape[1:]` and `output.ragged_rank=indices.shape.ndims + params.ragged_rank`. Raises: ValueError: If indices.shape.ndims is not known statically. """ del validate_indices if not isinstance(axis, int) or axis != 0: raise ValueError('axis != 0 is not supported for ragged gather yet.') if not isinstance(batch_dims, int) or batch_dims != 0: raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') with ops.name_scope(name, 'RaggedGather', [params, indices]): params = ragged_tensor.convert_to_tensor_or_ragged_tensor( params, name='params') indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( indices, name='indices') params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) if ragged_tensor.is_ragged(indices): return indices.with_values(gather(params, indices.values)) if not ragged_tensor.is_ragged(params): return array_ops.gather(params, indices) indices = ops.convert_to_tensor(indices) if indices.shape.ndims is None: raise ValueError('indices.shape.ndims must be known statically') result = gen_ragged_array_ops.ragged_gather( indices=indices, params_dense_values=params.flat_values, params_nested_splits=params.nested_row_splits, OUTPUT_RAGGED_RANK=indices.shape.ndims + len(params.nested_row_splits) - 1) # Compose the RaggedTensor from splits & values. return ragged_tensor.RaggedTensor.from_nested_row_splits( result.output_dense_values, result.output_nested_splits, validate=False) #=============================================================================== # ragged.gather_nd #=============================================================================== def gather_nd(params, indices, batch_dims=0, name=None): """Gather slices from `params` using `n`-dimensional indices. This operation is similar to `gather`, but it uses the innermost dimension of `indices` to define a slice into `params`. In particular, if: * `indices` has shape `[A1...AN, I]` * `params` has shape `[B1...BM]` Then: * `result` has shape `[A1...AN, B_{I+1}...BM]`. * `result[a1...aN] = params[indices[a1...aN, :]]` Args: params: A potentially ragged tensor with shape `[A1...AN, I]`. indices: A potentially ragged tensor with shape `[B1...BM]`. batch_dims: Must be zero. name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`. #### Examples: ```python >>> params = tf.compat.v1.ragged.constant_value( ... [ [ ['000', '001'], ['010' ] ], ... [ ['100' ], ['110', '111', '112'], ['120'] ], ... [ [ ], ['210' ] ] ]) >>> # Gather 2D slices from a 3D tensor >>> ragged.gather_nd(params, [[2], [0]]) [ [ [ ], ['210'] ] [ ['000', '001'], ['010'] ] ] >>> # Gather 1D slices from a 3D tensor >>> ragged.gather_nd(params, [[2, 1], [0, 0]]) [['210'], ['000', '001']] >>> # Gather scalars from a 3D tensor >>> ragged.gather_nd(params, [[0, 0, 1], [1, 1, 2]]) ['001', '112'] ``` """ if not isinstance(batch_dims, int) or batch_dims != 0: raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)): return array_ops.gather_nd(params, indices, name) with ops.name_scope(name, 'RaggedGatherNd', [params, indices]): params = ragged_tensor.convert_to_tensor_or_ragged_tensor( params, name='params') indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( indices, name='indices') params, indices = ragged_tensor.match_row_splits_dtypes(params, indices) indices_shape = indices.shape indices_ndims = indices_shape.ndims if indices_ndims is None: raise ValueError('indices.rank be statically known.') if indices_ndims == 0: raise ValueError('indices.rank must be at least 1.') if (ragged_tensor.is_ragged(indices) and indices_ndims == indices.ragged_rank + 1): raise ValueError('The innermost dimension of indices may not be ragged') # `index_size` is the "n" in "gather_nd" -- i.e., the number of dimensions # that each index slices into. index_size = tensor_shape.dimension_value(indices_shape[-1]) if index_size is None: raise ValueError('indices.shape[-1] must be statically known.') # If `indices` has more than 2 dimensions, then recurse. If `indices` is # dense, then we convert it to ragged before recursing, and then convert # the result back to `dense` if appropriate. if indices_ndims > 2: indices_is_dense = not ragged_tensor.is_ragged(indices) if indices_is_dense: indices = ragged_tensor.RaggedTensor.from_tensor( indices, ragged_rank=indices_ndims - 2, row_splits_dtype=params.row_splits.dtype) result = indices.with_flat_values(gather_nd(params, indices.flat_values)) if (indices_is_dense and ragged_tensor.is_ragged(result) and result.ragged_rank == indices_ndims - 2): result = ragged_tensor.RaggedTensor.to_tensor(result) return result # indices_ndims <= 2, and the innermost dimension of indices may not be # ragged, so `indices` must not be ragged. assert not ragged_tensor.is_ragged(indices) assert ragged_tensor.is_ragged(params) # Handle corner case: An empty index tuple selects the entire `params` # value. So if `index_size` is zero, then tile `params`. if index_size == 0: params_ndims = params.ragged_rank + array_ops.rank(params.flat_values) for dim in range(indices_ndims - 1): params = ragged_array_ops.expand_dims(params, axis=0) multiples = array_ops.concat([ array_ops.shape(indices)[:-1], array_ops.ones([params_ndims], dtypes.int32) ], axis=0) return ragged_array_ops.tile(params, multiples) # When index_size=1, we can just flatten the index tuples and use gather. elif index_size == 1: flattened_index_tuples = array_ops.reshape(indices, [-1]) return gather(params, flattened_index_tuples) # Otherwise, params is a RaggedTensor, and indices is a 1D or 2D Tensor. # Flatten both the index tuples and the params, such that the flattened # index tuples point to the correct values in the flattened params; and # then use ragged.gather on the flattened index tuples & params. else: indices = math_ops.cast(indices, params.row_splits.dtype) # Flatten the outermost 2 dimensions of the index tuples & params. flattened_index_tuples = array_ops.gather(params.row_splits, indices[..., 0]) flattened_index_tuples += indices[..., 1] flattened_params = params.values # Flatten any remaining dimensions. for dim in range(2, index_size): if not ragged_tensor.is_ragged(flattened_params): flattened_index_tuples = array_ops.expand_dims( flattened_index_tuples, axis=1) flattened_index_tuples = array_ops.concat( [flattened_index_tuples, indices[..., dim:]], axis=1) return array_ops.gather_nd(flattened_params, flattened_index_tuples) flattened_index_tuples = array_ops.gather( flattened_params.row_starts(), flattened_index_tuples) flattened_index_tuples += indices[..., dim] flattened_params = flattened_params.values # Gather using the flattened index tuples and params. return gather(flattened_params, flattened_index_tuples) #=============================================================================== # Gradient for the RaggedGather kernel #=============================================================================== @ops.RegisterGradient('RaggedGather') def _ragged_gather_grad(op, *grads): """Gradient for RaggedGather op.""" param_nested_splits = op.inputs[:-2] param_inner_values = op.inputs[-2] indices = op.inputs[-1] grad_inner_values = grads[-1] # For each row in `params`, find the range of values in `params.inner_values` # that is covered by that row. In particular, the values in row `i` are # `param_inner_values[combined_splits[i]:combined_splits[i+1]`. combined_splits = param_nested_splits[0] for row_splits in param_nested_splits[1:]: combined_splits = array_ops.gather(row_splits, combined_splits) # The outer dimensions of `indices` correspond 1:1 with the outer dimensions # of `ragged_grad` that are encoded by `grad_nested_splits`. Thus, the # flattened `indices` correspond 1:1 with `grad_inner_values`. flat_indices = array_ops.reshape(indices, [-1]) # Build an IndexedSlices where the values are taken from `flat_grad`. grad_indices = ragged_math_ops.range( array_ops.gather(combined_splits, flat_indices), array_ops.gather(combined_splits[1:], flat_indices)).values param_inner_values_grad = indexed_slices.IndexedSlices( values=grad_inner_values, indices=grad_indices, dense_shape=array_ops.shape(param_inner_values)) return [None for _ in param_nested_splits] + [param_inner_values_grad, None]
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_gather_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for ragged tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import gen_ragged_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.util.tf_export import tf_export #=============================================================================== # ragged.range #=============================================================================== # pylint: disable=redefined-builtin @tf_export('ragged.range') def range(starts, limits=None, deltas=1, dtype=None, name=None, row_splits_dtype=dtypes.int64): """Returns a `RaggedTensor` containing the specified sequences of numbers. Each row of the returned `RaggedTensor` contains a single sequence: ```python ragged.range(starts, limits, deltas)[i] == tf.range(starts[i], limits[i], deltas[i]) ``` If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then `output[i]` will be an empty list. This behavior is consistent with the Python `range` function, but differs from the `tf.range` op, which returns an error for these cases. Examples: ```python >>> ragged.range([3, 5, 2]).eval().tolist() [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]] >>> ragged.range([0, 5, 8], [3, 3, 12]).eval().tolist() [[0, 1, 2], [], [8, 9, 10, 11]] >>> ragged.range([0, 5, 8], [3, 3, 12], 2).eval().tolist() [[0, 2], [], [8, 10]] ``` The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. The vector inputs must all have the same size. Scalar inputs are broadcast to match the size of the vector inputs. Args: starts: Vector or scalar `Tensor`. Specifies the first entry for each range if `limits` is not `None`; otherwise, specifies the range limits, and the first entries default to `0`. limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for each range. deltas: Vector or scalar `Tensor`. Specifies the increment for each range. Defaults to `1`. dtype: The type of the elements of the resulting tensor. If not specified, then a value is chosen based on the other args. name: A name for the operation. row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` of type `dtype` with `ragged_rank=1`. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if limits is None: starts, limits = 0, starts with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name: starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts') limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits') deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas') # infer dtype if not explicitly provided if dtype is None: starts, limits, deltas = _infer_matching_dtype( [starts, limits, deltas], [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]) result = gen_ragged_math_ops.ragged_range( starts, limits, deltas, Tsplits=row_splits_dtype, name=name) return ragged_tensor.RaggedTensor.from_row_splits(result.rt_dense_values, result.rt_nested_splits, validate=False) def _infer_matching_dtype(tensors, dtype_hierarchy): """Infers a matching dtype for tensors, and casts them to that dtype.""" assert all(t.dtype in dtype_hierarchy for t in tensors) inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index) return [math_ops.cast(t, inferred_dtype) for t in tensors] ops.no_gradient('RaggedRange') #=============================================================================== # ragged_segment_<AGGREGATE> #=============================================================================== # Docstring template used for the raggged_segment_<AGGREGATE> ops. _RAGGED_SEGMENT_DOCSTRING = """\ Computes the %(combination)s along segments of a RaggedTensor. Returns a RaggedTensor `output` with `num_segments` rows, where the row `output[i]` is formed by taking the %(combination)s of all rows of `data` whose corresponding `segment_id` is `i`. The length of the row `output[i]` will be the maximum of the lengths of all rows of `data` whose corresponding `segment_id` is `i`. If no `data` rows correspond to a given segment ID, then the output row for that segment ID will be empty. Args: data: A `RaggedTensor` containing the values to combine. segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or `int32`. `segment_ids.shape` must be a prefix of `data.shape`. Must be greater than or equal to zero, and less than `num_segments`. `segment_ids` is not required to be sorted. num_segments: An `int32` or `int64` scalar specifying the number of distinct segment ids. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the %(combined)s values. The returned tensor has the same dtype as `data`, and its shape is `[num_segments] + data.shape[segment_ids.rank:]`. Raises: ValueError: If `segment_ids.shape` is not a prefix of `data.shape`. """ def _ragged_segment_aggregate(unsorted_segment_op, data, segment_ids, num_segments, separator=None, name=None): """Aggregates along segments of a RaggedTensor using `unsorted_segment_op`. Returns a RaggedTensor `output` with `num_segments` rows, where the row `output[i]` is formed by combining all rows of `data` whose corresponding `segment_id` is `i`. The values in each row are combined using `unsorted_segment_op`. The length of the row `output[i]` will be the maximum of the lengths of all rows of `data` whose corresponding `segment_id` is `i`. If no `data` rows correspond to a given segment ID, then the output row for that segment ID will be empty. Args: unsorted_segment_op: The tensorflow `op` that should be used to combine values in each row. Must have the same signature and basic behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. data: A `RaggedTensor` containing the values to be combined. segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or `int32`. `segment_ids.shape` must be a prefix of `data.shape`. `segment_ids` is not required to be sorted. num_segments: An `int32` or `int64` scalar. separator: An optional string. Defaults to None. The separator to use when joining. Only used for string types. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the aggregated values. The returned tensor has the same dtype as `data`, and its shape is `[num_segments] + data.shape[segment_ids.rank:]`. Raises: ValueError: If segment_ids.shape is not a prefix of data.shape. """ if not (ragged_tensor.is_ragged(data) or ragged_tensor.is_ragged(segment_ids)): if separator is not None: # It uses unsorted_segment_join. return unsorted_segment_op(data, segment_ids, num_segments, separator, name) else: return unsorted_segment_op(data, segment_ids, num_segments, name) with ops.name_scope(name, 'RaggedSegment', [data, segment_ids, num_segments]) as name: data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor( segment_ids, name='segment_ids') data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids) if segment_ids.dtype not in (dtypes.int32, dtypes.int64): raise ValueError('segment_ids must have dtype int32 or int64.') if ragged_tensor.is_ragged(segment_ids): if not ragged_tensor.is_ragged(data): raise ValueError('segment_ids.shape must be a prefix of data.shape, ' 'but segment_ids is ragged and data is not.') check_splits = check_ops.assert_equal( segment_ids.row_splits, data.row_splits, message='segment_ids.shape must be a prefix of data.shape') with ops.control_dependencies([check_splits]): return _ragged_segment_aggregate(unsorted_segment_op, data.values, segment_ids.values, num_segments, separator) # Find the length of each row in data. (shape=[data_nrows]) data_row_lengths = data.row_splits[1:] - data.row_splits[:-1] # Find the length that each output row will have. The length of the row # corresponding to segment `id` is `max(data_row_lengths[i])` where # `segment_ids[i]=id`. (shape=[output_nrows]) output_row_lengths = math_ops.maximum( math_ops.unsorted_segment_max(data_row_lengths, segment_ids, num_segments), 0) # Build the splits tensor for the output RaggedTensor. output_splits = array_ops.concat([ array_ops.zeros([1], output_row_lengths.dtype), math_ops.cumsum(output_row_lengths) ], axis=0) # For each row in `data`, find the start & limit position where that row's # values will be aggregated in output.values. data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids) data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths # For each value in `data.values`, find the position where it will # aggregated in `output.values`. # Get the target output values index for each data values index. data_val_to_out_val_index = range(data_row_to_out_row_start, data_row_to_out_row_limit).values # Recursively aggregate the values. output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values, data_val_to_out_val_index, output_splits[-1], separator) return ragged_tensor.RaggedTensor.from_row_splits( output_values, output_splits, validate=False) def segment_sum(data, segment_ids, num_segments, name=None): # For docs, see: _RAGGED_SEGMENT_DOCSTRING return _ragged_segment_aggregate(math_ops.unsorted_segment_sum, data=data, segment_ids=segment_ids, num_segments=num_segments, name=(name or'RaggedSegmentSum')) def segment_prod(data, segment_ids, num_segments, name=None): # For docs, see: _RAGGED_SEGMENT_DOCSTRING return _ragged_segment_aggregate(math_ops.unsorted_segment_prod, data=data, segment_ids=segment_ids, num_segments=num_segments, name=(name or 'RaggedSegmentProd')) def segment_min(data, segment_ids, num_segments, name=None): # For docs, see: _RAGGED_SEGMENT_DOCSTRING return _ragged_segment_aggregate(math_ops.unsorted_segment_min, data=data, segment_ids=segment_ids, num_segments=num_segments, name=(name or 'RaggedSegmentMin')) def segment_max(data, segment_ids, num_segments, name=None): # For docs, see: _RAGGED_SEGMENT_DOCSTRING return _ragged_segment_aggregate(math_ops.unsorted_segment_max, data=data, segment_ids=segment_ids, num_segments=num_segments, name=(name or 'RaggedSegmentMax')) def segment_mean(data, segment_ids, num_segments, name=None): """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" with ops.name_scope(name, 'RaggedSegmentMean', [data, segment_ids, num_segments]): total = segment_sum(data, segment_ids, num_segments) ones = ragged_tensor.RaggedTensor.from_nested_row_splits( array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False) count = segment_sum(ones, segment_ids, num_segments) if ragged_tensor.is_ragged(total): return total.with_flat_values(total.flat_values / count.flat_values) else: return total / count def segment_sqrt_n(data, segment_ids, num_segments, name=None): """For docs, see: _RAGGED_SEGMENT_DOCSTRING.""" with ops.name_scope(name, 'RaggedSegmentSqrtN', [data, segment_ids, num_segments]): total = segment_sum(data, segment_ids, num_segments) ones = ragged_tensor.RaggedTensor.from_nested_row_splits( array_ops.ones_like(data.flat_values), data.nested_row_splits, validate=False) count = segment_sum(ones, segment_ids, num_segments) if ragged_tensor.is_ragged(total): return total.with_flat_values( total.flat_values / math_ops.sqrt(count.flat_values)) else: return total / math_ops.sqrt(count) def _set_ragged_segment_docstring(func, combination, combined): func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict( combination=combination, combined=combined) _set_ragged_segment_docstring(segment_sum, 'sum', 'summed') _set_ragged_segment_docstring(segment_prod, 'product', 'multiplied') _set_ragged_segment_docstring(segment_min, 'minimum', 'minimized') _set_ragged_segment_docstring(segment_max, 'maximum', 'maximized') _set_ragged_segment_docstring(segment_mean, 'mean', 'averaged') _set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)', 'summed') #=============================================================================== # ragged_reduce_<AGGREGATE> #=============================================================================== # Docstring template used for ragged_reduce_<AGGREGATE> ops. _RAGGED_REDUCE_DOCSTRING = """\ Computes the %(combination)s of elements across dimensions of a `RaggedTensor`. Reduces `input_tensor` along the dimensions given in `axis` by taking the %(combination)s of values. If a reduced dimension has no elements for some index, then the value for that index will be %(default)s. The rank of the tensor is reduced by `1` for each entry in `axis`. If `axis` is not specified, then all dimensions are reduced, and a scalar value is returned. Args: input_tensor: A `RaggedTensor` containing the values to be %(combined)s. axis: The dimensions to reduce. May be `None` (to reduce all axes), an `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a given set of axes), or a `Tensor` with a constant value. Must be in the range `[0, input_tensor.rank]`. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the %(combined)s values. The returned tensor has the same dtype as `data`, and its shape is given by removing the dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank` of the returned tensor is given by substracting any ragged dimensions specified in `axis` from `input_tensor.ragged_rank`. Raises: ValueError: If `axis` contains a `Tensor` whose value is not constant. ####Example: ```python%(example)s ``` """ _RAGGED_REDUCE_SUM_EXAMPLE = """ >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> ragged.reduce_sum(rt, axis=0).eval().tolist() [15, 12, 4] # = [3+1+9+2, 1+5+6, 4] >>> ragged.reduce_sum(rt, axis=1).eval().tolist() [8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6] """ _RAGGED_REDUCE_PROD_EXAMPLE = """ >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> ragged.reduce_prod(rt, axis=0).eval().tolist() [54, 30, 4] # = [3*1*9*2, 1*5*6, 4] >>> ragged.reduce_prod(rt, axis=1).eval().tolist() [12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6] """ _RAGGED_REDUCE_MIN_EXAMPLE = """ >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> ragged.reduce_min(rt, axis=0).eval().tolist() [1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4] >>> ragged.reduce_min(rt, axis=1).eval().tolist() [1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)] """ _RAGGED_REDUCE_MAX_EXAMPLE = """ >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> ragged.reduce_max(rt, axis=0).eval().tolist() [9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4] >>> ragged.reduce_max(rt, axis=1).eval().tolist() [4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)] """ _RAGGED_REDUCE_MEAN_EXAMPLE = """ >>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]]) >>> ragged.reduce_mean(rt, axis=0).eval().tolist() [3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4] >>> ragged.reduce_mean(rt, axis=1).eval().tolist() [2.66666, 3, 9, 4] # = [mean(3, 1, 4), mean(1, 5), 9, mean(2, 6)] """ _RAGGED_REDUCE_ALL_EXAMPLE = """ >>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]]) >>> ragged.reduce_all(rt, axis=0).eval().tolist() [False, True, False, True] >>> ragged.reduce_all(rt, axis=1).eval().tolist() [True, False, False] """ _RAGGED_REDUCE_ANY_EXAMPLE = """ >>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]]) >>> ragged.reduce_any(rt, axis=0).eval().tolist() [True, True, False, True] >>> ragged.reduce_any(rt, axis=1).eval().tolist() [True, True, True] """ def ragged_reduce_aggregate(reduce_op, unsorted_segment_op, rt_input, axis, keepdims, separator=None, name=None): """Aggregates across axes of a RaggedTensor using the given `Tensor` ops. Reduces `rt_input` along the dimensions given in `axis`. The rank of the tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified, then all dimensions are reduced, and a scalar value is returned. This op assumes that `reduce_op` and `unsorted_segment_op` are associative; if not, then reducing multiple axes will return incorrect results. (In particular, reducing multiple axes is currently implemented by reducing the axes one at a time.) Args: reduce_op: The tensorflow `op` that should be used to reduce values in uniform dimensions. Must have the same signature and basic behavior as `reduce_sum`, `reduce_max`, etc. unsorted_segment_op: The tensorflow `op` that should be used to combine values in ragged dimensions. Must have the same signature and basic behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc. rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced. axis: The axis or axes to reduce. May be `None` (to reduce all axes), an `int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a given set of axes), or a `Tensor` with a constant value. Must be in the range `[0, rt_input.rank)`. keepdims: If true, retains reduced dimensions with length 1. separator: An optional string. Defaults to None. The separator to use when joining. The separator must not be set for non-string data types. (i.e. if separator is not None then it uses string ops) name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the reduced values. The returned tensor has the same dtype as `data`, and its shape is given by removing the dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank` of the returned tensor is given by substracting any ragged dimensions specified in `axis` from `rt_input.ragged_rank`. Raises: ValueError: If `axis` contains a `Tensor` whose value is not constant. """ if not ragged_tensor.is_ragged(rt_input): if separator is None: return reduce_op(rt_input, axis, name=name) else: # When separator is not None, We infer that dtype is string and # reduce_join will be called. return reduce_op(rt_input, axis, name=name, separator=separator) if keepdims: raise ValueError('keepdims=True is not supported for RaggedTensors.') if isinstance(axis, ops.Tensor): axis = tensor_util.constant_value(axis) if axis is None: raise ValueError('axis must be known at graph construction time.') if isinstance(axis, np.ndarray): axis = axis.tolist() # When reducing all axes, just ignore splits & reduce the inner values. if axis is None: return reduce_op(rt_input.flat_values, None, name=name) with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]): if isinstance(axis, (tuple, list)): if not axis: return rt_input elif len(axis) == 1: axis = axis[0] else: # When reducing multiple axes, as we reduce one at a time (see below), # the negative axis has to be converted to positive at the first run # as the sort with negative axis will have different orders. # See GitHub issue 27497. axis = [ ragged_util.get_positive_axis(a, rt_input.shape.ndims) for a in axis ] # When reducing multiple axes, just reduce one at a time. This is less # efficient, and only works for associative ops. (In particular, it # does not work for reduce_mean.) However, reducing multiple axes at # once will probably require a nontrivial c++ op. axis = sorted(axis) inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op, rt_input, axis[-1], keepdims, separator) return ragged_reduce_aggregate(reduce_op, unsorted_segment_op, inner_reduced, axis[:-1], keepdims, separator) rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor( rt_input, name='rt_input') axis = ragged_util.get_positive_axis(axis, rt_input.shape.ndims) if axis == 0: # out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N] row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1] num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0) segment_ids = range(row_lengths).values return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, segment_ids, num_segments, separator) elif axis == 1: # out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N] num_segments = array_ops.shape(rt_input.row_splits)[0] - 1 segment_ids = segment_id_ops.row_splits_to_segment_ids( rt_input.row_splits) return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values, segment_ids, num_segments, separator) else: # out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] = # sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N] return rt_input.with_values( ragged_reduce_aggregate(reduce_op, unsorted_segment_op, rt_input.values, axis - 1, keepdims, separator)) def reduce_sum(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" return ragged_reduce_aggregate( reduce_op=math_ops.reduce_sum, unsorted_segment_op=math_ops.unsorted_segment_sum, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=(name or 'RaggedReduceSum')) def reduce_prod(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" return ragged_reduce_aggregate( reduce_op=math_ops.reduce_prod, unsorted_segment_op=math_ops.unsorted_segment_prod, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=(name or 'RaggedReduceProd')) def reduce_min(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" return ragged_reduce_aggregate( reduce_op=math_ops.reduce_min, unsorted_segment_op=math_ops.unsorted_segment_min, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=(name or 'RaggedReduceMin')) def reduce_max(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" return ragged_reduce_aggregate( reduce_op=math_ops.reduce_max, unsorted_segment_op=math_ops.unsorted_segment_max, rt_input=input_tensor, axis=axis, keepdims=keepdims, name=(name or 'RaggedReduceMax')) def reduce_mean(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]): total = reduce_sum(input_tensor, axis, keepdims) if ragged_tensor.is_ragged(input_tensor): ones = ragged_tensor.RaggedTensor.from_nested_row_splits( array_ops.ones_like(input_tensor.flat_values), input_tensor.nested_row_splits, validate=False) else: ones = array_ops.ones_like(input_tensor) count = reduce_sum(ones, axis, keepdims) if ragged_tensor.is_ragged(total): return ragged_tensor.RaggedTensor.from_nested_row_splits( total.flat_values / count.flat_values, total.nested_row_splits, validate=False) else: return total / count def _cast(input_tensor, dtype): return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor, dtype) def reduce_all(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]): return _cast( reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims), dtypes.bool) def reduce_any(input_tensor, axis=None, keepdims=None, name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]): return _cast( reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims), dtypes.bool) def _set_ragged_reduce_docstring(func, combination, combined, default, example): func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict( combination=combination, combined=combined, default=default, example=example) _set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0', _RAGGED_REDUCE_SUM_EXAMPLE) _set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1', _RAGGED_REDUCE_PROD_EXAMPLE) _set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized', '`input_tensor.dtype.min`', _RAGGED_REDUCE_MIN_EXAMPLE) _set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized', '`input_tensor.dtype.max`', _RAGGED_REDUCE_MAX_EXAMPLE) _set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN', _RAGGED_REDUCE_MEAN_EXAMPLE) _set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True', _RAGGED_REDUCE_ALL_EXAMPLE) _set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False', _RAGGED_REDUCE_ANY_EXAMPLE)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_math_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_string_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedStringOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def test_rank_one(self): input_array = [b'this', b'is', b'a', b'test'] truth = b'thisisatest' truth_shape = [] with self.cached_session(): output = ragged_string_ops.reduce_join( inputs=input_array, axis=-1, keepdims=False, separator='') output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) self.assertAllEqual(truth_shape, output.get_shape()) @parameterized.parameters([ { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 0, 'keepdims': False, 'truth': [ b'thisplease', b'isdo', b'anot', b'testpanic', b'for!', b'ragged', b'tensors' ], 'truth_shape': [7], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [b'thisisatestforraggedtensors', b'pleasedonotpanic!'], 'truth_shape': [2], }, { 'input_array': [[ b'this', b'is', b'a', b'test', b'for', b'ragged', b'tensors' ], [b'please', b'do', b'not', b'panic', b'!']], 'axis': 1, 'keepdims': False, 'truth': [ b'this|is|a|test|for|ragged|tensors', b'please|do|not|panic|!' ], 'truth_shape': [2], 'separator': '|', }, { 'input_array': [[[b'a', b'b'], [b'b', b'c']], [[b'dd', b'ee']]], 'axis': -1, 'keepdims': False, 'truth': [[b'a|b', b'b|c'], [b'dd|ee']], 'truth_shape': [2, None], 'separator': '|', }, { 'input_array': [[[[b'a', b'b', b'c'], [b'dd', b'ee']]], [[[b'f', b'g', b'h'], [b'ii', b'jj']]]], 'axis': -2, 'keepdims': False, 'truth': [[[b'a|dd', b'b|ee', b'c']], [[b'f|ii', b'g|jj', b'h']]], 'truth_shape': [2, None, None], 'separator': '|', }, { 'input_array': [[[b't', b'h', b'i', b's'], [b'i', b's'], [b'a'], [b't', b'e', b's', b't']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'axis': -1, 'keepdims': False, 'truth': [[b'this', b'is', b'a', b'test'], [b'please', b'panic']], 'truth_shape': [2, None], 'separator': '', }, { 'input_array': [[[[b't'], [b'h'], [b'i'], [b's']], [[b'i', b's']], [[b'a', b'n']], [[b'e'], [b'r'], [b'r']]], [[[b'p'], [b'l'], [b'e'], [b'a'], [b's'], [b'e']], [[b'p'], [b'a'], [b'n'], [b'i'], [b'c']]]], 'axis': -1, 'keepdims': False, 'truth': [[[b't', b'h', b'i', b's'], [b'is'], [b'an'], [b'e', b'r', b'r']], [[b'p', b'l', b'e', b'a', b's', b'e'], [b'p', b'a', b'n', b'i', b'c']]], 'truth_shape': [2, None, None], 'separator': '', }, ]) def test_different_ranks(self, input_array, axis, keepdims, truth, truth_shape, separator=''): with self.cached_session(): input_tensor = ragged_factory_ops.constant(input_array) output = ragged_string_ops.reduce_join( inputs=input_tensor, axis=axis, keepdims=keepdims, separator=separator) output_array = self.evaluate(output) self.assertAllEqual(truth, output_array) if all(isinstance(s, tensor_shape.Dimension) for s in output.shape): output_shape = [dim.value for dim in output.shape] else: output_shape = output.shape self.assertAllEqual(truth_shape, output_shape) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_string_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Private convenience functions for RaggedTensors. None of these methods are exposed in the main "ragged" package. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import gen_ragged_math_ops from tensorflow.python.ops import math_ops def assert_splits_match(nested_splits_lists): """Checks that the given splits lists are identical. Performs static tests to ensure that the given splits lists are identical, and returns a list of control dependency op tensors that check that they are fully identical. Args: nested_splits_lists: A list of nested_splits_lists, where each split_list is a list of `splits` tensors from a `RaggedTensor`, ordered from outermost ragged dimension to innermost ragged dimension. Returns: A list of control dependency op tensors. Raises: ValueError: If the splits are not identical. """ error_msg = "Inputs must have identical ragged splits" for splits_list in nested_splits_lists: if len(splits_list) != len(nested_splits_lists[0]): raise ValueError(error_msg) return [ check_ops.assert_equal(s1, s2, message=error_msg) for splits_list in nested_splits_lists[1:] for (s1, s2) in zip(nested_splits_lists[0], splits_list) ] # Note: imported here to avoid circular dependency of array_ops. get_positive_axis = array_ops.get_positive_axis convert_to_int_tensor = array_ops.convert_to_int_tensor repeat = array_ops.repeat_with_axis def lengths_to_splits(lengths): """Returns splits corresponding to the given lengths.""" return array_ops.concat([[0], math_ops.cumsum(lengths)], axis=-1) def repeat_ranges(params, splits, repeats): """Repeats each range of `params` (as specified by `splits`) `repeats` times. Let the `i`th range of `params` be defined as `params[splits[i]:splits[i + 1]]`. Then this function returns a tensor containing range 0 repeated `repeats[0]` times, followed by range 1 repeated `repeats[1]`, ..., followed by the last range repeated `repeats[-1]` times. Args: params: The `Tensor` whose values should be repeated. splits: A splits tensor indicating the ranges of `params` that should be repeated. repeats: The number of times each range should be repeated. Supports broadcasting from a scalar value. Returns: A `Tensor` with the same rank and type as `params`. #### Example: ```python >>> repeat_ranges(['a', 'b', 'c'], [0, 2, 3], 3) ['a', 'b', 'a', 'b', 'a', 'b', 'c', 'c', 'c'] ``` """ # Divide `splits` into starts and limits, and repeat them `repeats` times. if repeats.shape.ndims != 0: repeated_starts = repeat(splits[:-1], repeats, axis=0) repeated_limits = repeat(splits[1:], repeats, axis=0) else: # Optimization: we can just call repeat once, and then slice the result. repeated_splits = repeat(splits, repeats, axis=0) n_splits = array_ops.shape(repeated_splits, out_type=repeats.dtype)[0] repeated_starts = repeated_splits[:n_splits - repeats] repeated_limits = repeated_splits[repeats:] # Get indices for each range from starts to limits, and use those to gather # the values in the desired repetition pattern. one = array_ops.ones((), repeated_starts.dtype) offsets = gen_ragged_math_ops.ragged_range( repeated_starts, repeated_limits, one) return array_ops.gather(params, offsets.rt_dense_values)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operator dispatch for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gen_bitwise_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_batch_gather_ops from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_squeeze_op from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_shape from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged import ragged_where_op from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_export from tensorflow.python.util import tf_inspect # @TODO(edloper): Set this to True in the CL that exports RaggedTensors. _UPDATE_DOCSTRINGS = False # Information about an argument to an operation: The name of the argument, its # position in the argument list, and a boolean flag indicating whether it # expects a list of tensors. _ArgInfo = collections.namedtuple('ArgInfo', ['name', 'position', 'is_list']) def _get_arg_infos(func, arg_names): """Returns an `_ArgInfo` for each argument of `func` specified by `arg_names`. Args: func: The function whose arguments should be described. arg_names: The names of the arguments to get info for. Returns: A tuple of `_ArgInfo`s. """ arg_infos = [] # Inspect the func's argspec to find the position of each arg. arg_spec = tf_inspect.getargspec(func) for argname in arg_names: assert isinstance(argname, str) is_list = argname.startswith('[') and argname.endswith(']') if is_list: argname = argname[1:-1] if argname not in arg_spec.args: raise ValueError('Argument %r not found function in %s. Args=%s' % (argname, func, arg_spec.args)) arg_infos.append(_ArgInfo(argname, arg_spec.args.index(argname), is_list)) return arg_infos def _is_convertible_to_tensor(value): """Returns true if `value` is convertible to a `Tensor`.""" if value is None: return True if isinstance(value, (ops.Tensor, variables.Variable, np.ndarray, int, float, str)): return True elif isinstance(value, (sparse_tensor.SparseTensor,)): return False else: try: ops.convert_to_tensor(value) return True except (TypeError, ValueError): return False class UnaryRaggedElementwiseDispatcher(dispatch.OpDispatcher): """OpDispatcher for unary ops that map a base op across ragged values.""" def __init__(self, original_op, arg_is_list=False): self._original_op = original_op self._arg_is_list = arg_is_list arg_names = tf_inspect.getfullargspec(original_op)[0] self._x = arg_names[0] if _UPDATE_DOCSTRINGS: original_op.__doc__ = ( original_op.__doc__.rstrip() + '\n\n' + ' `{x}` may be a `tf.RaggedTensor`.\n'.format(x=self._x)) def handle(self, args, kwargs): if args: x, args = args[0], args[1:] else: kwargs = kwargs.copy() x = kwargs.pop(self._x, None) if x is None: return self.NOT_SUPPORTED if self._arg_is_list: found_ragged = False for elt in x: if ragged_tensor.is_ragged(elt): found_ragged = True elif not _is_convertible_to_tensor(elt): return self.NOT_SUPPORTED if found_ragged: x = ragged_tensor.match_row_splits_dtypes(*x) nested_splits_lists = [ elt.nested_row_splits for elt in x if ragged_tensor.is_ragged(elt) ] flat_values = [ elt.flat_values if ragged_tensor.is_ragged(elt) else elt for elt in x ] with ops.control_dependencies( ragged_util.assert_splits_match(nested_splits_lists)): return ragged_tensor.RaggedTensor.from_nested_row_splits( self._original_op(flat_values, *args, **kwargs), nested_splits_lists[0], validate=False) else: return self.NOT_SUPPORTED else: found_ragged = ragged_tensor.is_ragged(x) if found_ragged: mapped_values = self._original_op(x.flat_values, *args, **kwargs) return x.with_flat_values(mapped_values) else: return self.NOT_SUPPORTED class BinaryRaggedElementwiseDispatcher(dispatch.OpDispatcher): """OpDispatcher for binary ops that map a base op across ragged values. Supports broadcasting. """ def __init__(self, original_op): self._original_op = original_op arg_names = tf_inspect.getfullargspec(original_op)[0] self._x = arg_names[0] self._y = arg_names[1] if _UPDATE_DOCSTRINGS: original_op.__doc__ = ( original_op.__doc__.rstrip() + '\n\n' + ' `{x}` and `{y}` may be a `tf.RaggedTensor`.\n'.format( x=self._x, y=self._y)) def handle(self, args, kwargs): # Extract the binary args. if len(args) > 1: x = args[0] y = args[1] args = args[2:] elif args: kwargs = kwargs.copy() x = args[0] y = kwargs.pop(self._y, None) args = args[1:] else: kwargs = kwargs.copy() x = kwargs.pop(self._x, None) y = kwargs.pop(self._y, None) # Bail if we don't have at least one ragged argument. x_is_ragged = ragged_tensor.is_ragged(x) y_is_ragged = ragged_tensor.is_ragged(y) if not (x_is_ragged or y_is_ragged): return self.NOT_SUPPORTED # Convert args to tensors. Bail if conversion fails. try: if not x_is_ragged: x = ops.convert_to_tensor(x, name=self._x, preferred_dtype=y.dtype) if not y_is_ragged: y = ops.convert_to_tensor(y, name=self._y, preferred_dtype=x.dtype) except (TypeError, ValueError): return self.NOT_SUPPORTED if x_is_ragged and y_is_ragged: x, y = ragged_tensor.match_row_splits_dtypes(x, y) if ((x_is_ragged and y_is_ragged) or (x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or (y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)): bcast_shape = ragged_tensor_shape.broadcast_dynamic_shape( ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x), ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y)) x = ragged_tensor_shape.broadcast_to( x, bcast_shape, broadcast_inner_dimensions=False) y = ragged_tensor_shape.broadcast_to( y, bcast_shape, broadcast_inner_dimensions=False) x_values = x.flat_values if ragged_tensor.is_ragged(x) else x y_values = y.flat_values if ragged_tensor.is_ragged(y) else y mapped_values = self._original_op(x_values, y_values, *args, **kwargs) if ragged_tensor.is_ragged(x): return x.with_flat_values(mapped_values) else: return y.with_flat_values(mapped_values) class RaggedDispatcher(dispatch.OpDispatcher): """OpDispatcher for ragged ops. Dispatches to a wrapped op-handler if at least one of the `tensor_args` arguments is a RaggedTensor or a RaggedTensorValue; and all of the `tensor_args` arguments are convertible to Tensor or RaggedTensor. """ def __init__(self, original_op, ragged_op, ragged_args): op_arg_names = tf_inspect.getfullargspec(original_op)[0] ragged_arg_names = tf_inspect.getfullargspec(ragged_op)[0] if op_arg_names != ragged_arg_names: raise AssertionError( 'Signature must exactly match when overriding %s with %s: %s vs %s' % (original_op, ragged_op, op_arg_names, ragged_arg_names)) self._ragged_op = ragged_op self._ragged_args = _get_arg_infos(ragged_op, ragged_args) if _UPDATE_DOCSTRINGS: arg_list = ' and '.join('`%s`' % arg for arg in ragged_args) original_op.__doc__ = ( original_op.__doc__.rstrip() + '\n\n' + ' {0} may be a `tf.RaggedTensor`.\n'.format(arg_list)) def handle(self, args, kwargs): if self.is_supported(args, kwargs): return self._ragged_op(*args, **kwargs) else: return self.NOT_SUPPORTED def is_supported(self, args, kwargs): found_ragged = False for arg_info in self._ragged_args: if arg_info.position < len(args): arg = args[arg_info.position] else: arg = kwargs.get(arg_info.name, None) if arg_info.is_list: if not isinstance(arg, (list, tuple)): return False for elt in arg: if ragged_tensor.is_ragged(elt): found_ragged = True elif not _is_convertible_to_tensor(elt): return False else: if ragged_tensor.is_ragged(arg): found_ragged = True elif not _is_convertible_to_tensor(arg): return False return found_ragged _UNARY_ELEMENTWISE_OPS = [ array_ops.check_numerics, array_ops.identity, array_ops.ones_like, array_ops.ones_like_v2, array_ops.zeros_like, array_ops.zeros_like_v2, clip_ops.clip_by_value, gen_bitwise_ops.invert, math_ops.abs, math_ops.acos, math_ops.acosh, math_ops.angle, math_ops.asin, math_ops.asinh, math_ops.atan, math_ops.atanh, math_ops.cast, math_ops.ceil, math_ops.conj, math_ops.cos, math_ops.cosh, math_ops.digamma, math_ops.erf, math_ops.erfc, math_ops.exp, math_ops.expm1, math_ops.floor, math_ops.imag, math_ops.is_finite, math_ops.is_inf, math_ops.is_nan, math_ops.lgamma, math_ops.log, math_ops.log1p, math_ops.log_sigmoid, math_ops.logical_not, math_ops.negative, math_ops.real, math_ops.reciprocal, math_ops.rint, math_ops.round, math_ops.rsqrt, math_ops.saturate_cast, math_ops.sign, math_ops.sin, math_ops.sinh, math_ops.sqrt, math_ops.square, math_ops.tan, parsing_ops.decode_compressed, string_ops.string_to_number, string_ops.string_to_hash_bucket, string_ops.as_string, string_ops.decode_base64, string_ops.encode_base64, string_ops.regex_full_match, string_ops.regex_replace, string_ops.string_strip, string_ops.string_to_hash_bucket, string_ops.string_to_hash_bucket_fast, string_ops.string_to_hash_bucket_strong, string_ops.substr, string_ops.substr_v2, string_ops.string_length, string_ops.string_length_v2, string_ops.unicode_script, ] _UNARY_LIST_ELEMENTWISE_OPS = [ math_ops.add_n, string_ops.string_join, ] _BINARY_ELEMENTWISE_OPS = [ gen_bitwise_ops.bitwise_and, gen_bitwise_ops.bitwise_or, gen_bitwise_ops.bitwise_xor, gen_bitwise_ops.left_shift, gen_bitwise_ops.right_shift, math_ops.add, math_ops.atan2, math_ops.complex, math_ops.div_no_nan, math_ops.divide, math_ops.equal, math_ops.floordiv, math_ops.floormod, math_ops.greater, math_ops.greater_equal, math_ops.less, math_ops.less_equal, math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor, math_ops.maximum, math_ops.minimum, math_ops.multiply, math_ops.not_equal, math_ops.pow, math_ops.realdiv, math_ops.squared_difference, math_ops.subtract, math_ops.truediv, math_ops.truncatediv, math_ops.truncatemod, ] # We don't need to register a separate delegation handler for these v1 ops, # since they delegate to the v2 ops (which already have a handler). But we # still want to include them in the ragged_op_list() output. _V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS = [ math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_min, math_ops.reduce_max, math_ops.reduce_mean, math_ops.reduce_any, math_ops.reduce_all, string_ops.string_to_number, string_ops.string_to_hash_bucket, string_ops.reduce_join_v2, ] def _ragged_gather_v1(params, indices, validate_indices=None, name=None, axis=0, batch_dims=0): return ragged_gather_ops.gather( params=params, indices=indices, validate_indices=validate_indices, axis=axis, batch_dims=batch_dims, name=name) def _ragged_gather_nd_v1(params, indices, name=None, batch_dims=0): return ragged_gather_ops.gather_nd( params=params, indices=indices, batch_dims=batch_dims, name=name) def _ragged_expand_dims_v1(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin if dim is not None: axis = dim return ragged_array_ops.expand_dims(input=input, axis=axis, name=name) def _ragged_size_v1(input, name=None, out_type=dtypes.int32): # pylint: disable=redefined-builtin return ragged_array_ops.size(input=input, out_type=out_type, name=name) def _ragged_squeeze_v1(input, axis=None, name=None, squeeze_dims=None): # pylint: disable=redefined-builtin axis = deprecation.deprecated_argument_lookup('axis', axis, 'squeeze_dims', squeeze_dims) return ragged_squeeze_op.squeeze(input, axis, name) def _ragged_dynamic_partition(data, partitions, num_partitions, name=None): """RaggedTensor Dispatch override for tf.dynamic_partition.""" if not isinstance(num_partitions, int) or num_partitions < 0: raise TypeError('num_partitions must be a non-negative integer') result = ragged_array_ops.stack_dynamic_partitions(data, partitions, num_partitions, name) return [result[i] for i in range(num_partitions)] # (original_op, ragged_op, ragged_args) _RAGGED_DISPATCH_OPS = [ (array_ops.batch_gather, ragged_batch_gather_ops.batch_gather, ['params', 'indices']), (array_ops.concat, ragged_concat_ops.concat, ['[values]']), (array_ops.expand_dims, _ragged_expand_dims_v1, ['input']), (array_ops.expand_dims_v2, ragged_array_ops.expand_dims, ['input']), (array_ops.gather, _ragged_gather_v1, ['params', 'indices']), (array_ops.gather_v2, ragged_gather_ops.gather, ['params', 'indices']), (array_ops.gather_nd, _ragged_gather_nd_v1, ['params', 'indices']), (array_ops.gather_nd_v2, ragged_gather_ops.gather_nd, ['params', 'indices']), (array_ops.one_hot, ragged_array_ops.ragged_one_hot, ['indices']), (array_ops.rank, ragged_array_ops.rank, ['input']), (array_ops.size, _ragged_size_v1, ['input']), (array_ops.size_v2, ragged_array_ops.size, ['input']), (array_ops.squeeze, _ragged_squeeze_v1, ['input']), (array_ops.squeeze_v2, ragged_squeeze_op.squeeze, ['input']), (array_ops.stack, ragged_concat_ops.stack, ['[values]']), (array_ops.tile, ragged_array_ops.tile, ['input']), (array_ops.where, ragged_where_op.where, ['condition', 'x', 'y']), (data_flow_ops.dynamic_partition, _ragged_dynamic_partition, ['data', 'partitions']), (math_ops.unsorted_segment_sum, ragged_math_ops.segment_sum, ['data', 'segment_ids']), (math_ops.unsorted_segment_prod, ragged_math_ops.segment_prod, ['data', 'segment_ids']), (math_ops.unsorted_segment_min, ragged_math_ops.segment_min, ['data', 'segment_ids']), (math_ops.unsorted_segment_max, ragged_math_ops.segment_max, ['data', 'segment_ids']), (math_ops.unsorted_segment_mean, ragged_math_ops.segment_mean, ['data', 'segment_ids']), (math_ops.unsorted_segment_sqrt_n, ragged_math_ops.segment_sqrt_n, ['data', 'segment_ids']), (string_ops.reduce_join_v2, ragged_string_ops.reduce_join, ['inputs']), (math_ops.reduce_sum, ragged_math_ops.reduce_sum, ['input_tensor']), (math_ops.reduce_prod, ragged_math_ops.reduce_prod, ['input_tensor']), (math_ops.reduce_min, ragged_math_ops.reduce_min, ['input_tensor']), (math_ops.reduce_max, ragged_math_ops.reduce_max, ['input_tensor']), (math_ops.reduce_mean, ragged_math_ops.reduce_mean, ['input_tensor']), (math_ops.reduce_any, ragged_math_ops.reduce_any, ['input_tensor']), (math_ops.reduce_all, ragged_math_ops.reduce_all, ['input_tensor']), ] def register_dispatchers(): """Constructs & registers OpDispatchers for ragged ops.""" op_list = ( _UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS + _BINARY_ELEMENTWISE_OPS + [x[0] for x in _RAGGED_DISPATCH_OPS]) for op in op_list: _, undecorated_op = tf_decorator.unwrap(op) if not hasattr(undecorated_op, tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names): raise AssertionError('Expected %s to be an exported symbol ' '(while adding a RaggedTensor dispatcher)') for op in _UNARY_ELEMENTWISE_OPS: UnaryRaggedElementwiseDispatcher(op).register(op) for op in _UNARY_LIST_ELEMENTWISE_OPS: UnaryRaggedElementwiseDispatcher(op, True).register(op) for op in _BINARY_ELEMENTWISE_OPS: BinaryRaggedElementwiseDispatcher(op).register(op) for (original_op, ragged_op, args) in _RAGGED_DISPATCH_OPS: RaggedDispatcher(original_op, ragged_op, args).register(original_op) def _ragged_op_signature(op, ragged_args): """Returns a signature for the given op, marking ragged args in bold.""" op_name = tf_export.get_canonical_name_for_symbol(op) argspec = tf_inspect.getfullargspec(op) arg_names = argspec.args # Mark ragged arguments in bold. for pos in ragged_args: arg_names[pos] = '**' + arg_names[pos] + '**' # Add argument defaults. for pos in range(-1, -len(argspec.defaults) - 1, -1): arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos]) # Add varargs and keyword args if argspec.varargs: arg_names.append('*' + argspec.varargs) if argspec.varkw: arg_names.append('**' + argspec.varkw) return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names)) def _op_is_in_tf_version(op, version): if version == 1: return (tf_export.get_v1_names(tf_decorator.unwrap(op)[1]) or op in _V2_OPS_THAT_ARE_DELEGATED_TO_FROM_V1_OPS) elif version == 2: return tf_export.get_v2_names(tf_decorator.unwrap(op)[1]) else: raise ValueError('Expected version 1 or 2.') def ragged_op_list(tf_version=1): """Returns a string listing operators that have dispathers registered.""" lines = [] for op in _UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS: if _op_is_in_tf_version(op, tf_version): lines.append(_ragged_op_signature(op, [0])) for op in _BINARY_ELEMENTWISE_OPS: if _op_is_in_tf_version(op, tf_version): lines.append(_ragged_op_signature(op, [0, 1])) for op, _, ragged_args in _RAGGED_DISPATCH_OPS: if _op_is_in_tf_version(op, tf_version): arginfos = _get_arg_infos(op, ragged_args) ragged_args = [arginfo.position for arginfo in arginfos] lines.append(_ragged_op_signature(op, ragged_args)) return ('\n\n### Additional ops that support `RaggedTensor`\n\n' 'Arguments that accept `RaggedTensor`s are marked in **bold**.\n\n' + '\n'.join(sorted(lines)) + 'n') register_dispatchers()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_dispatch.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Array operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_dispatch # pylint: disable=unused-import from tensorflow.python.ops.ragged import ragged_operators # pylint: disable=unused-import from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_tensor_shape from tensorflow.python.ops.ragged import ragged_where_op #=============================================================================== # ragged.batch_gather_with_default #=============================================================================== def batch_gather_with_default(params, indices, default_value='', name=None): """Same as `batch_gather` but inserts `default_value` for invalid indices. This operation is similar to `batch_gather` except that it will substitute the value for invalid indices with `default_value` as the contents. See `batch_gather` for more details. Args: params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`, `M>0`). indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`). default_value: A value to be inserted in places where `indices` are out of bounds. Must be the same dtype as params and either a scalar or rank 1. name: A name for the operation (optional). Returns: A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`. `result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`. #### Example: ```python >>> params = tf.ragged.constant([ ['a', 'b', 'c'], ['d'], [], ['e']]) >>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]]) >>> batch_gather_with_default(params, indices, 'FOO') [['b', 'c', 'FOO'], [], [], ['e', 'FOO']] ``` """ with ops.name_scope(name, 'RaggedBatchGatherWithDefault'): params = ragged_tensor.convert_to_tensor_or_ragged_tensor( params, name='params', ) indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( indices, name='indices', ) default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor( default_value, name='default_value', ) row_splits_dtype, (params, indices, default_value) = ( ragged_tensor.match_row_splits_dtypes(params, indices, default_value, return_dtype=True)) # TODO(hterry): lift this restriction and support default_values of # of rank > 1 if default_value.shape.ndims not in (0, 1): raise ValueError('"default_value" must be a scalar or vector') upper_bounds = None if indices.shape.ndims is None: raise ValueError('Indices must have a known rank.') if params.shape.ndims is None: raise ValueError('Params must have a known rank.') num_batch_dimensions = indices.shape.ndims - 1 pad = None # The logic for this works as follows: # - create a padded params, where: # padded_params[b1...bn, 0] = default_value # padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0) # - create an `upper_bounds` Tensor that contains the number of elements # in each innermost rank. Broadcast `upper_bounds` to be the same shape # as `indices`. # - check to see which index in `indices` are out of bounds and substitute # it with the index containing `default_value` (the first). # - call batch_gather with the indices adjusted. with ops.control_dependencies([ check_ops.assert_greater_equal(array_ops.rank(params), array_ops.rank(indices))]): if ragged_tensor.is_ragged(params): row_lengths = ragged_array_ops.expand_dims( params.row_lengths(axis=num_batch_dimensions), axis=-1) upper_bounds = math_ops.cast(row_lengths, indices.dtype) pad_shape = _get_pad_shape(params, indices, row_splits_dtype) pad = ragged_tensor_shape.broadcast_to( default_value, pad_shape) else: params_shape = array_ops.shape(params) pad_shape = array_ops.concat([ params_shape[:num_batch_dimensions], [1], params_shape[num_batch_dimensions + 1:params.shape.ndims] ], 0) upper_bounds = params_shape[num_batch_dimensions] pad = array_ops.broadcast_to(default_value, pad_shape) # Add `default_value` as the first value in the innermost (ragged) rank. pad = math_ops.cast(pad, params.dtype) padded_params = array_ops.concat( [pad, params], axis=num_batch_dimensions) # Adjust the indices by substituting out-of-bound indices to the # default-value index (which is the first element) shifted_indices = indices + 1 is_out_of_bounds = (indices < 0) | (indices > upper_bounds) adjusted_indices = ragged_where_op.where( is_out_of_bounds, x=array_ops.zeros_like(indices), y=shifted_indices, ) return array_ops.batch_gather( params=padded_params, indices=adjusted_indices, name=name) def _get_pad_shape(params, indices, row_splits_dtype): """Gets the RaggedTensorDynamicShape for the pad tensor.""" num_batch_dimensions = indices.shape.ndims - 1 params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor( params, dim_size_dtype=row_splits_dtype) # We want to create a pad tensor that can be concatenated with the params. if params.shape.ndims == indices.shape.ndims: # When params and indices are the same rank, the shape of the pad tensor is # almost identical to params, except the last dimension which has size = 1. if params_shape.num_inner_dimensions == 0: pad_dims = params_shape.partitioned_dim_sizes[:-1] + ( array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),) return ragged_tensor_shape.RaggedTensorDynamicShape( pad_dims, []) else: return ragged_tensor_shape.RaggedTensorDynamicShape( params_shape.partitioned_dim_sizes, array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0)) else: # When the rank of indices < params, the pad has the same dimension as # params up to the 'num_batch_dimensions' rank. Every dimension after that # has size 1. pad_dims = None if num_batch_dimensions == 0: pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + ( constant_op.constant([1], dtype=row_splits_dtype),) * ( params_shape.num_partitioned_dimensions - num_batch_dimensions - 1) else: batch_dimensions = params_shape.partitioned_dim_sizes[ :num_batch_dimensions] gather_dimension = params_shape.partitioned_dim_sizes[ num_batch_dimensions] pad_dims = batch_dimensions + ( array_ops.ones_like(gather_dimension),) * ( params_shape.num_partitioned_dimensions - num_batch_dimensions) return ragged_tensor_shape.RaggedTensorDynamicShape( pad_dims, params_shape.inner_dim_sizes)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """where operation for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_tensor def where(condition, x=None, y=None, name=None): """Return the elements, either from `x` or `y`, depending on the `condition`. : If both `x` and `y` are `None`: Returns the coordinates of true elements of `condition`. The coordinates are returned in a 2-D tensor with shape `[num_true_values, dim_size(condition)]`, where `result[i]` is the coordinates of the `i`th true value (in row-major order). : If both `x` and `y` are non-`None`: Returns a tensor formed by selecting values from `x` where condition is true, and from `y` when condition is false. In particular: : If `condition`, `x`, and `y` all have the same shape: * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true. * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false. : Otherwise: * `condition` must be a vector. * `x` and `y` must have the same number of dimensions. * The outermost dimensions of `condition`, `x`, and `y` must all have the same size. * `result[i] = x[i]` if `condition[i]` is true. * `result[i] = y[i]` if `condition[i]` is false. Args: condition: A potentially ragged tensor of type `bool` x: A potentially ragged tensor (optional). y: A potentially ragged tensor (optional). Must be specified if `x` is specified. Must have the same rank and type as `x`. name: A name of the operation (optional) Returns: : If both `x` and `y` are `None`: A `Tensor` with shape `(num_true, dim_size(condition))`. : Otherwise: A potentially ragged tensor with the same type, rank, and outermost dimension size as `x` and `y`. `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`. Raises: ValueError: When exactly one of `x` or `y` is non-`None`; or when `condition`, `x`, and `y` have incompatible shapes. #### Examples: ```python >>> # Coordinates where condition is true. >>> condition = tf.compat.v1.ragged.constant_value( ... [[True, False, True], [False, True]]) >>> ragged.where(condition) [[0, 0], [0, 2], [1, 1]] >>> # Elementwise selection between x and y, based on condition. >>> condition = tf.compat.v1.ragged.constant_value( ... [[True, False, True], [False, True]]) >>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]) >>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]) >>> ragged.where(condition, x, y) [['A', 'b', 'C'], ['d', 'E']] >>> # Row selection between x and y, based on condition. >>> condition = [True, False] >>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']]) >>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']]) >>> ragged.where(condition, x, y) [['A', 'B', 'C'], ['d', 'e']] ``` """ if (x is None) != (y is None): raise ValueError('x and y must be either both None or both non-None') with ops.name_scope('RaggedWhere', name, [condition, x, y]): condition = ragged_tensor.convert_to_tensor_or_ragged_tensor( condition, name='condition') if x is None: return _coordinate_where(condition) else: x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x') y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y') condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y) return _elementwise_where(condition, x, y) def _elementwise_where(condition, x, y): """Ragged version of tf.where(condition, x, y).""" condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor) x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor) y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor) if not (condition_is_ragged or x_is_ragged or y_is_ragged): return array_ops.where(condition, x, y) elif condition_is_ragged and x_is_ragged and y_is_ragged: return ragged_functional_ops.map_flat_values(array_ops.where, condition, x, y) elif not condition_is_ragged: # Concatenate x and y, and then use `gather` to assemble the selected rows. condition.shape.assert_has_rank(1) x_and_y = ragged_concat_ops.concat([x, y], axis=0) x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype) y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype) indices = array_ops.where(condition, math_ops.range(x_nrows), x_nrows + math_ops.range(y_nrows)) return ragged_gather_ops.gather(x_and_y, indices) else: raise ValueError('Input shapes do not match.') def _coordinate_where(condition): """Ragged version of tf.where(condition).""" if not isinstance(condition, ragged_tensor.RaggedTensor): return array_ops.where(condition) # The coordinate for each `true` value in condition.values. selected_coords = _coordinate_where(condition.values) # Convert the first index in each coordinate to a row index and column index. condition = condition.with_row_splits_dtype(selected_coords.dtype) first_index = selected_coords[:, 0] selected_rows = array_ops.gather(condition.value_rowids(), first_index) selected_row_starts = array_ops.gather(condition.row_splits, selected_rows) selected_cols = first_index - selected_row_starts # Assemble the row & column index with the indices for inner dimensions. return array_ops.concat([ array_ops.expand_dims(selected_rows, 1), array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:] ], axis=1) def _nrows(rt_input, out_type): if isinstance(rt_input, ragged_tensor.RaggedTensor): return rt_input.nrows(out_type=out_type) else: return array_ops.shape(rt_input, out_type=out_type)[0]
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_where_op.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_concat_ops.stack.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedStackOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters( dict( descr='One rank-2 input (ragged_rank=1), axis=0', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']],), # shape=(3, None) axis=0, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]), dict( descr='One rank-2 input (ragged_rank=1), axis=1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None) axis=1, expected=[ [[b'a00', b'a01']], [[]], [[b'a20', b'a21', b'a22']]]), dict( descr='One rank-2 input (ragged_rank=1), axis=2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None) axis=2, expected=[ [[b'a00'], [b'a01']], [], [[b'a20'], [b'a21'], [b'a22']]]), dict( descr='One rank-2 input (ragged_rank=1), axis=-3', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']],), # shape=(3, None) axis=-3, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]), dict( descr='One rank-2 input (ragged_rank=1), axis=-2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None) axis=-2, expected=[ [[b'a00', b'a01']], [[]], [[b'a20', b'a21', b'a22']]]), dict( descr='One rank-2 input (ragged_rank=1), axis=-1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None) axis=-1, expected=[ [[b'a00'], [b'a01']], [], [[b'a20'], [b'a21'], [b'a22']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=0', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None) [['b00'], ['b10']]), # shape=(2, None) axis=0, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'], [b'b10']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None) axis=1, expected=[ [[b'a00', b'a01'], [b'b00']], [[], [b'b10', b'b11', b'b12']], [[b'a20', b'a21', b'a22'], [b'b20']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None) axis=2, expected=[ [[b'a00', b'b00'], [b'a01', b'b01']], [], [[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=-3', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None) [['b00'], ['b10']]), # shape=(2, None) axis=-3, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'], [b'b10']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=-2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None) axis=-2, expected=[ [[b'a00', b'a01'], [b'b00']], [[], [b'b10', b'b11', b'b12']], [[b'a20', b'a21', b'a22'], [b'b20']]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=-1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None) axis=-1, expected=[ [[b'a00', b'b00'], [b'a01', b'b01']], [], [[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]), dict( descr='Three rank-2 inputs (ragged_rank=1), axis=0', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10']], # shape=(2, None) [['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None) axis=0, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22']], [[b'b00'], [b'b10']], [[b'c00'], [b'c10', b'c11'], [b'c21']]]), dict( descr='Three rank-2 inputs (ragged_rank=1), axis=1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None) [[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None) axis=1, expected=[ [[b'a00', b'a01'], [b'b00'], []], [[], [b'b10', b'b11', b'b12'], [b'c10', b'c11']], [[b'a20', b'a21', b'a22'], [b'b20'], [b'c20', b'c21']]], expected_shape=[3, None, None]), dict( descr='Three rank-2 inputs (ragged_rank=1), axis=2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00', 'b01'], [], ['b20', 'b21', 'b22']], # shape=(3, None) [['c00', 'c01'], [], ['c20', 'c21', 'c22']]), # shape=(3, None) axis=2, expected=[ [[b'a00', b'b00', b'c00'], [b'a01', b'b01', b'c01']], [], [[b'a20', b'b20', b'c20'], [b'a21', b'b21', b'c21'], [b'a22', b'b22', b'c22']]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=0', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[['b000']], [['b100', 'b101'], ['b110']]], [[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]), axis=0, expected=[ [[[b'a000', b'a001'], [b'a010']], [[b'a100', b'a101', b'a102'], [b'a110', b'a111']]], [[[b'b000']], [[b'b100', b'b101'], [b'b110']]], [[], [[b'c100', b'c101', b'c102', b'c103']], [[], [b'c210', b'c211']]]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=1', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[['b000']], [['b100', 'b101'], ['b110']]], [[], [[], ['c110', 'c111']]]), axis=1, expected=[ [[[b'a000', b'a001'], [b'a010']], [[b'b000']], []], [[[b'a100', b'a101', b'a102'], [b'a110', b'a111']], [[b'b100', b'b101'], [b'b110']], [[], [b'c110', b'c111']]]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=2', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]], [[['c000'], ['c010']], [[], ['c110', 'c111']]]), axis=2, expected=[ [[[b'a000', b'a001'], [], [b'c000']], [[b'a010'], [b'b010', b'b011'], [b'c010']]], [[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []], [[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=3', rt_inputs=( [[['a000', 'a001'], ['a010']]], [[['b000', 'b001'], ['b010']]], [[['c000', 'c001'], ['c010']]]), axis=3, expected=[[ [[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']], [[b'a010', b'b010', b'c010']]]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=-2', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]], [[['c000'], ['c010']], [[], ['c110', 'c111']]]), axis=-2, expected=[ [[[b'a000', b'a001'], [], [b'c000']], [[b'a010'], [b'b010', b'b011'], [b'c010']]], [[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []], [[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=-1', rt_inputs=( [[['a000', 'a001'], ['a010']]], [[['b000', 'b001'], ['b010']]], [[['c000', 'c001'], ['c010']]]), axis=-1, expected=[[ [[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']], [[b'a010', b'b010', b'c010']]]]), dict( descr='ragged_stack([uniform, ragged, uniform], axis=1)', ragged_ranks=[0, 1, 0], rt_inputs=( [['0('], ['1('], ['2(']], # shape=(3, 1) [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None) [[')0'], [')1'], [')2']]), # shape=(3, 1) axis=1, expected=[ [[b'0('], [b'b00'], [b')0']], [[b'1('], [b'b10', b'b11', b'b12'], [b')1']], [[b'2('], [b'b20'], [b')2']]]), dict( descr='ragged_stack([uniform, uniform], axis=0)', ragged_ranks=[0, 0], rt_inputs=( [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2) [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3) axis=0, expected=[ [[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']], [[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]), dict( descr='ragged_stack([uniform, ragged], axis=0)', ragged_ranks=[0, 1], rt_inputs=( [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2) [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3) axis=0, expected=[ [[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']], [[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]), dict( descr='ragged_stack([uniform, ragged], axis=0) with rank-3 inputs', ragged_ranks=[0, 2], rt_inputs=( [[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2) [[[8], [8, 8]]]), # shape = (2, None, None) axis=0, expected=[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], [[[8], [8, 8]]]]), dict( descr='Two rank-3 inputs with ragged_rank=1, axis=-1', ragged_ranks=[1, 1], rt_inputs=( [[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]], [[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]), axis=-1, expected=[ [[[0, 9], [1, 8]], [[2, 7], [3, 6]], [[4, 5], [5, 4]]], [], [[[6, 3], [7, 2]], [[8, 1], [9, 0]]]], expected_shape=[3, None, 2, 2]), dict( descr='Two rank-3 inputs with ragged_rank=1, axis=-2', ragged_ranks=[1, 1], rt_inputs=( [[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]], [[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]), axis=-2, expected=[ [[[0, 1], [9, 8]], [[2, 3], [7, 6]], [[4, 5], [5, 4]]], [], [[[6, 7], [3, 2]], [[8, 9], [1, 0]]]]), dict( descr='ragged_stack([vector, vector], axis=0)', ragged_ranks=[0, 0], rt_inputs=([1, 2, 3], [4, 5, 6]), axis=0, expected=[[1, 2, 3], [4, 5, 6]]), dict( descr='One input (so just adds an outer dimension)', rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],), axis=0, expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]), ) # pyformat: disable def testRaggedStack(self, descr, rt_inputs, axis, expected, ragged_ranks=None, expected_ragged_rank=None, expected_shape=None): if ragged_ranks is None: ragged_ranks = [None] * len(rt_inputs) rt_inputs = [ ragged_factory_ops.constant(rt_input, ragged_rank=rrank) # pylint: disable=g-long-ternary if rrank != 0 else constant_op.constant(rt_input) for (rt_input, rrank) in zip(rt_inputs, ragged_ranks) ] stacked = ragged_concat_ops.stack(rt_inputs, axis) if expected_ragged_rank is not None: self.assertEqual(stacked.ragged_rank, expected_ragged_rank) if expected_shape is not None: self.assertEqual(stacked.shape.as_list(), expected_shape) self.assertAllEqual(stacked, expected) @parameterized.parameters( dict( rt_inputs=(), axis=0, error=ValueError, message=r'rt_inputs may not be empty\.'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=r'foo', error=TypeError, message='axis must be an int'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=-4, error=ValueError, message='axis=-4 out of bounds: expected -3<=axis<3'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=3, error=ValueError, message='axis=3 out of bounds: expected -3<=axis<3'), ) def testError(self, rt_inputs, axis, error, message): self.assertRaisesRegexp(error, message, ragged_concat_ops.stack, rt_inputs, axis) def testSingleTensorInput(self): """Tests ragged_stack with a single tensor input. Usually, we pass a list of values in for rt_inputs. However, you can also pass in a single value (as with tf.stack), in which case it is equivalent to expand_dims(axis=0). This test exercises that path. """ rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]]) stacked = ragged_concat_ops.stack(rt_inputs, 0) self.assertAllEqual(stacked, [[[1, 2], [3, 4]]]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_stack_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.to_tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_conversion_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToTensorOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testDocStringExamples(self): """Example from ragged_to_tensor.__doc__.""" rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]]) dt = rt.to_tensor() self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]]) @parameterized.parameters( { 'rt_input': [], 'ragged_rank': 1, 'expected': [], 'expected_shape': [0, 0], }, { 'rt_input': [[1, 2, 3], [], [4], [5, 6]], 'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]] }, { 'rt_input': [[1, 2, 3], [], [4], [5, 6]], 'default': 9, 'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]] }, { 'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]], 'ragged_rank': 1, 'default': [9], 'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]], [[5], [6], [9]]] }, { 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]], 'expected': [ [[1, 2], [0, 0], [3, 4]], # [[0, 0], [0, 0], [0, 0]], # [[5, 0], [0, 0], [0, 0]], # [[6, 7], [8, 0], [0, 0]], # ] }, { 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]], 'default': 9, 'expected': [ [[1, 2], [9, 9], [3, 4]], # [[9, 9], [9, 9], [9, 9]], # [[5, 9], [9, 9], [9, 9]], # [[6, 7], [8, 9], [9, 9]], # ] }, { 'rt_input': [[[1], [2], [3]]], 'ragged_rank': 1, 'default': 0, 'expected': [[[1], [2], [3]]], }, { 'rt_input': [[[[1], [2]], [], [[3]]]], 'default': 9, 'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]], }, ) def testRaggedTensorToTensor(self, rt_input, expected, ragged_rank=None, default=None, expected_shape=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) dt = rt.to_tensor(default) self.assertIsInstance(dt, ops.Tensor) self.assertEqual(rt.dtype, dt.dtype) self.assertTrue(dt.shape.is_compatible_with(rt.shape)) if expected_shape is not None: expected = np.ndarray(expected_shape, buffer=np.array(expected)) self.assertAllEqual(dt, expected) @parameterized.parameters( { 'rt_input': [[1, 2, 3]], 'default': [0], 'error': (ValueError, r'Shape \(1,\) must have rank at most 0'), }, { 'rt_input': [[[1, 2], [3, 4]], [[5, 6]]], 'ragged_rank': 1, 'default': [7, 8, 9], 'error': (ValueError, r'Shapes \(3,\) and \(2,\) are incompatible'), }, { 'rt_input': [[1, 2, 3]], 'default': 'a', 'error': (TypeError, '.*'), }, ) def testError(self, rt_input, default, error, ragged_rank=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) with self.assertRaisesRegexp(error[0], error[1]): rt.to_tensor(default) # This covers the tests above, but with the new implementation. @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToTensorOpNewTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testDocStringExamples(self): """Example from ragged_to_tensor.__doc__.""" rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]]) dt = ragged_conversion_ops.ragged_to_dense(rt) self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]]) @parameterized.parameters( { 'rt_input': [], 'ragged_rank': 1, 'expected': [], 'expected_shape': [0, 0], }, { 'rt_input': [[1, 2, 3], [], [4], [5, 6]], 'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]] }, { 'rt_input': [[1, 2, 3], [], [4], [5, 6]], 'default': 9, 'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]] }, { 'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]], 'ragged_rank': 1, 'default': [9], 'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]], [[5], [6], [9]]] }, { 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]], 'expected': [ [[1, 2], [0, 0], [3, 4]], # [[0, 0], [0, 0], [0, 0]], # [[5, 0], [0, 0], [0, 0]], # [[6, 7], [8, 0], [0, 0]], # ] }, { 'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]], 'default': 9, 'expected': [ [[1, 2], [9, 9], [3, 4]], # [[9, 9], [9, 9], [9, 9]], # [[5, 9], [9, 9], [9, 9]], # [[6, 7], [8, 9], [9, 9]], # ] }, { 'rt_input': [[[1], [2], [3]]], 'ragged_rank': 1, 'default': 0, 'expected': [[[1], [2], [3]]], }, { 'rt_input': [[[[1], [2]], [], [[3]]]], 'default': 9, 'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]], }, ) def testRaggedTensorToTensor(self, rt_input, expected, ragged_rank=None, default=None, expected_shape=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) dt = ragged_conversion_ops.ragged_to_dense(rt, default_value=default) self.assertIsInstance(dt, ops.Tensor) self.assertEqual(rt.dtype, dt.dtype) self.assertTrue(dt.shape.is_compatible_with(rt.shape)) if expected_shape is not None: expected = np.ndarray(expected_shape, buffer=np.array(expected)) self.assertAllEqual(dt, expected) @parameterized.parameters( { 'rt_input': [[1, 2, 3]], 'default': 'a', 'error': (TypeError, '.*'), }, { 'rt_input': [[1, 2, 3]], 'default': 'b', 'error': (TypeError, '.*'), }) def testError(self, rt_input, default, error, ragged_rank=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank) with self.assertRaisesRegexp(error[0], error[1]): ragged_conversion_ops.ragged_to_dense(rt, default_value=default) @test_util.run_all_in_graph_and_eager_modes class RaggedToTensorOpAdditionalTests(test_util.TensorFlowTestCase): def _compare_to_reference(self, ragged_tensor, expected=None, default_value=None): treatment = ragged_conversion_ops.ragged_to_dense( ragged_tensor, default_value=default_value) control = ragged_tensor.to_tensor(default_value=default_value) self.assertAllEqual(control, treatment) if expected is not None: self.assertAllEqual(expected, treatment) def test_already_dense_simple(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant([6, 7, 8, 9, 10, 11], dtype=dtypes.int64), value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference(input_data, [[6, 7, 8], [9, 10, 11]]) def test_already_dense_with_dense_values_and_default(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( [[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]], dtype=dtypes.int64), value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference( input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]], default_value=constant_op.constant([31, 32], dtype=dtypes.int64)) def test_already_dense_with_dense_values(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( [[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]], dtype=dtypes.int64), value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference( input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]]) def test_ragged_with_dense_values_and_default(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( [[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64), value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference( input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 3]]], default_value=[2, 3]) def test_ragged_with_dense_values_and_small_default(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( [[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64), value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference( input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 2]]], default_value=2) def test_already_dense_with_dense_values_string(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( [[b'a', b'b'], [b'c', b'd'], [b'e', b'f'], [b'g', b'jalapeno'], [b'kangaroo', b'llama'], [b'manzana', b'nectar']], dtype=dtypes.string), value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference(input_data, [[[b'a', b'b'], [b'c', b'd'], [b'e', b'f']], [[b'g', b'jalapeno'], [b'kangaroo', b'llama'], [b'manzana', b'nectar']]]) def test_already_dense_with_string(self): """This studies a tensor initialized with value_rowids and nrows.""" input_data = RaggedTensor.from_value_rowids( values=constant_op.constant( ['a', 'b', 'c', 'd', 'e', 'antidisestablishmentarianism'], dtype=dtypes.string), value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference( input_data, [[b'a', b'b', b'c'], [b'd', b'e', b'antidisestablishmentarianism']]) def test_already_dense(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [3, 4, 5]]) self._compare_to_reference(input_data, [[0, 1, 2], [3, 4, 5]]) def test_true_ragged(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]]) self._compare_to_reference(input_data, [[0, 1, 2], [0, 0, 0], [3, 0, 0]]) def test_true_ragged_default_3(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]]) self._compare_to_reference( input_data, [[0, 1, 2], [3, 3, 3], [3, 3, 3]], default_value=3) def test_three_dimensional_ragged(self): input_data = ragged_factory_ops.constant([[[0, 1, 2], []], [], [[3]]]) self._compare_to_reference( input_data, [[[0, 1, 2], [3, 3, 3]], [[3, 3, 3], [3, 3, 3]], [[3, 3, 3], [3, 3, 3]]], default_value=3) def test_empty_tensor(self): input_data = RaggedTensor.from_value_rowids( values=constant_op.constant([], dtype=dtypes.int64), value_rowids=constant_op.constant([], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) self._compare_to_reference(input_data, [[], []], default_value=3) def test_empty_last(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []]) self._compare_to_reference(input_data, [[0, 1, 2], [0, 0, 0], [3, 0, 0], [0, 0, 0]]) def test_shape_limit(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[2, 3]) self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [2, 3]) def test_shape_limit_tuple(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=(2, 3)) self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [2, 3]) def test_shape_limit_tensor_shape(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense( input_data, shape=tensor_shape.TensorShape([2, 3])) self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [2, 3]) def test_shape_half_limit_tensor_shape(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense( input_data, shape=tensor_shape.TensorShape([2, None])) self.assertAllEqual(actual, [[0, 1, 2, 3], [0, 0, 0, 0]]) def test_skip_eager_shape_half_limit_tensor_shape(self): # Eager would produce a shape of [2, 4] input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense( input_data, shape=tensor_shape.TensorShape([2, None])) result = actual.shape.as_list() # This is equal to [2, 4] in eager, or [2, None] in non-eager. self.assertEqual(result[0], 2) def test_shape_limit_shape_is_tensor_int64(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense( input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int64)) self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [2, 3]) def test_shape_limit_shape_is_tensor_int32(self): input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []]) actual = ragged_conversion_ops.ragged_to_dense( input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int32)) self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [2, 3]) def test_shape_expand_first_dim(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]]) actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[4, 4]) self.assertAllEqual( actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0], [0, 0, 0, 0]]) self.assertEqual(actual.shape.as_list(), [4, 4]) def test_value_transposed(self): # This test tries to get a tensor in columnar format, where I am uncertain # as to whether the underlying op, which copies data in the raw format, # could fail. my_value = array_ops.transpose( constant_op.constant([[0, 1, 2, 3], [4, 5, 6, 7]])) input_data = RaggedTensor.from_value_rowids( values=my_value, value_rowids=constant_op.constant([0, 1, 2, 3], dtype=dtypes.int64), nrows=constant_op.constant(4, dtype=dtypes.int64), validate=True) self._compare_to_reference(input_data, [[[0, 4]], [[1, 5]], [[2, 6]], [[3, 7]]]) # This fails on the older version of to_tensor. def test_broadcast_default(self): # This test is commented out. The functionality here is not supported. # The dense dimension here is 2 x 2 input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []], ragged_rank=1) # This placeholder has a 2 x 1 dimension. default_value = array_ops.placeholder_with_default([[5], [6]], shape=None) actual = ragged_conversion_ops.ragged_to_dense( input_data, default_value=default_value) expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]] self.assertAllEqual(actual, expected) # This fails on the older version of to_tensor. def test_broadcast_default_no_placeholder(self): # Again, this functionality is not supported. It fails more gracefully # when creating the op. input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []], ragged_rank=1) # default_value has a 2 x 1 dimension. default_value = constant_op.constant([[5], [6]], shape=None) actual = ragged_conversion_ops.ragged_to_dense( input_data, default_value=default_value) expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]] self.assertAllEqual(actual, expected) def test_shape_expand_second_dim(self): input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []]) actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[3, 4]) self.assertAllEqual(actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0]]) def test_empty_tensor_with_shape(self): input_data = RaggedTensor.from_value_rowids( values=constant_op.constant([], dtype=dtypes.int64), value_rowids=constant_op.constant([], dtype=dtypes.int64), nrows=constant_op.constant(2, dtype=dtypes.int64), validate=True) actual = ragged_conversion_ops.ragged_to_dense( input_data, default_value=3, shape=[2, 3]) self.assertAllEqual(actual, [[3, 3, 3], [3, 3, 3]]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ragged operations for working with string Tensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_string_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import compat as util_compat from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export @tf_export("strings.bytes_split") def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin """Split string elements of `input` into bytes. Examples: ```python >>> tf.strings.bytes_split('hello') ['h', 'e', 'l', 'l', 'o'] >>> tf.strings.bytes_split(['hello', '123']) <RaggedTensor [['h', 'e', 'l', 'l', 'o'], ['1', '2', '3']]> ``` Note that this op splits strings into bytes, not unicode characters. To split strings into unicode characters, use `tf.strings.unicode_split`. See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`. Args: input: A string `Tensor` or `RaggedTensor`: the strings to split. Must have a statically known rank (`N`). name: A name for the operation (optional). Returns: A `RaggedTensor` of rank `N+1`: the bytes that make up the source strings. """ with ops.name_scope(name, "StringsByteSplit", [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input") if isinstance(input, ragged_tensor.RaggedTensor): return input.with_flat_values(string_bytes_split(input.flat_values)) rank = input.shape.ndims if rank is None: raise ValueError("input must have a statically-known rank.") if rank == 0: return string_bytes_split(array_ops.stack([input]))[0] elif rank == 1: indices, values, shape = gen_string_ops.string_split( input, delimiter="", skip_empty=False) return ragged_tensor.RaggedTensor.from_value_rowids( values=values, value_rowids=indices[:, 0], nrows=shape[0], validate=False) else: return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input)) # pylint: disable=redefined-builtin @tf_export("strings.unicode_encode") def unicode_encode(input, output_encoding, errors="replace", replacement_char=65533, name=None): r"""Encodes each sequence of Unicode code points in `input` into a string. `result[i1...iN]` is the string formed by concatenating the Unicode codepoints `input[1...iN, :]`, encoded using `output_encoding`. Args: input: An `N+1` dimensional potentially ragged integer tensor with shape `[D1...DN, num_chars]`. output_encoding: Unicode encoding that should be used to encode each codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`. errors: Specifies the response when an invalid codepoint is encountered (optional). One of: * `'replace'`: Replace invalid codepoint with the `replacement_char`. (default) * `'ignore'`: Skip invalid codepoints. * `'strict'`: Raise an exception for any invalid codepoint. replacement_char: The replacement character codepoint to be used in place of any invalid input when `errors='replace'`. Any valid unicode codepoint may be used. The default value is the default unicode replacement character which is 0xFFFD (U+65533). name: A name for the operation (optional). Returns: A `N` dimensional `string` tensor with shape `[D1...DN]`. #### Example: ```python >>> input = [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] >>> unicode_encode(input, 'UTF-8') ['G\xc3\xb6\xc3\xb6dnight', '\xf0\x9f\x98\x8a'] ``` """ with ops.name_scope(name, "UnicodeEncode", [input]): input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input) if input_tensor.shape.ndims is None: raise ValueError("Rank of input_tensor must be statically known.") if ragged_tensor.is_ragged(input_tensor): if input_tensor.flat_values.shape.ndims > 1: # If the flat_values of our ragged tensor is multi-dimensional, we can # process it separately and our output will have the same nested splits # as our input. return input_tensor.with_flat_values( unicode_encode(input_tensor.flat_values, output_encoding, errors, replacement_char)) elif input_tensor.ragged_rank > 1: # Recursively process the values of the ragged tensor. return input_tensor.with_values( unicode_encode(input_tensor.values, output_encoding, errors, replacement_char)) else: # Our ragged tensor is of the correct shape (rank 1 flat_values tensor # with ragged_rank of 1) so we can process it as normal. return gen_string_ops.unicode_encode( input_values=input_tensor.values, input_splits=input_tensor.row_splits, output_encoding=output_encoding, errors=errors, replacement_char=replacement_char) else: if input_tensor.shape.ndims == 2: # The input tensor is of the correct 2-D shape, it's just not ragged. return unicode_encode( ragged_tensor.RaggedTensor.from_tensor(input_tensor), output_encoding, errors, replacement_char) elif input_tensor.shape.ndims > 2: # We need to initially flatten the input tensor to 2-D, and then can # reshape the output of our processed flattened tensor. flat_input_tensor = array_ops.reshape( input_tensor, array_ops.stack([-1, array_ops.shape(input_tensor)[-1]])) flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding, errors, replacement_char) return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1]) elif input_tensor.shape.ndims == 0: raise ValueError("input_tensor's rank must be at least 1.") else: # Our input tensor is rank 1, so we create a ragged tensor with an added # dimension to create the correct input shape & type, and then remove # the additional dimension from the output and return the string scalar. ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits( input_tensor, array_ops.stack( [0, array_ops.shape(input_tensor, out_type=dtypes.int32)[0]]), validate=False) output_tensor = unicode_encode(ragged_input_tensor, output_encoding, errors, replacement_char) return array_ops.reshape(output_tensor, []) # pylint: disable=redefined-builtin @tf_export("strings.unicode_decode") def unicode_decode(input, input_encoding, errors="replace", replacement_char=0xFFFD, replace_control_characters=False, name=None): r"""Decodes each string in `input` into a sequence of Unicode code points. `result[i1...iN, j]` is the Unicode codepoint for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`; and in place of C0 control characters in `input` when `replace_control_characters=True`. replace_control_characters: Whether to replace the C0 control characters `(U+0000 - U+001F)` with the `replacement_char`. name: A name for the operation (optional). Returns: A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. The returned tensor is a `tf.Tensor` if `input` is a scalar, or a `tf.RaggedTensor` otherwise. #### Example: ```python >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> tf.strings.unicode_decode(input, 'UTF-8').tolist() [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] ``` """ with ops.name_scope(name, "UnicodeDecode", [input]): return _unicode_decode(input, input_encoding, errors, replacement_char, replace_control_characters, with_offsets=False) @tf_export("strings.unicode_decode_with_offsets") def unicode_decode_with_offsets(input, input_encoding, errors="replace", replacement_char=0xFFFD, replace_control_characters=False, name=None): r"""Decodes each string into a sequence of code points with start offsets. This op is similar to `tf.strings.decode(...)`, but it also returns the start offset for each character in its respective string. This information can be used to align the characters with the original byte sequence. Returns a tuple `(codepoints, start_offsets)` where: * `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`; and in place of C0 control characters in `input` when `replace_control_characters=True`. replace_control_characters: Whether to replace the C0 control characters `(U+0000 - U+001F)` with the `replacement_char`. name: A name for the operation (optional). Returns: A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. The returned tensors are `tf.Tensor`s if `input` is a scalar, or `tf.RaggedTensor`s otherwise. #### Example: ```python >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8') >>> result[0].tolist() # codepoints [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]] >>> result[1].tolist() # offsets [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] ``` """ with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]): return _unicode_decode(input, input_encoding, errors, replacement_char, replace_control_characters, with_offsets=True) @tf_export("strings.unicode_split") def unicode_split(input, input_encoding, errors="replace", replacement_char=0xFFFD, name=None): r"""Splits each string in `input` into a sequence of Unicode code points. `result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its `j`th character, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`. name: A name for the operation (optional). Returns: A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`. The returned tensor is a `tf.Tensor` if `input` is a scalar, or a `tf.RaggedTensor` otherwise. #### Example: ```python >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> tf.strings.unicode_split(input, 'UTF-8').tolist() [['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'], ['\xf0\x9f\x98\x8a']] ``` """ with ops.name_scope(name, "UnicodeSplit", [input]): codepoints = _unicode_decode(input, input_encoding, errors, replacement_char, False, with_offsets=False) return unicode_encode( ragged_array_ops.expand_dims(codepoints, -1), output_encoding=input_encoding, errors=errors, replacement_char=replacement_char) @tf_export("strings.unicode_split_with_offsets") def unicode_split_with_offsets(input, input_encoding, errors="replace", replacement_char=0xFFFD, name=None): r"""Splits each string into a sequence of code points with start offsets. This op is similar to `tf.strings.decode(...)`, but it also returns the start offset for each character in its respective string. This information can be used to align the characters with the original byte sequence. Returns a tuple `(chars, start_offsets)` where: * `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its `j`th character, when decoded using `input_encoding`. * `start_offsets[i1...iN, j]` is the start byte offset for the `j`th character in `input[i1...iN]`, when decoded using `input_encoding`. Args: input: An `N` dimensional potentially ragged `string` tensor with shape `[D1...DN]`. `N` must be statically known. input_encoding: String name for the unicode encoding that should be used to decode each string. errors: Specifies the response when an input string can't be converted using the indicated encoding. One of: * `'strict'`: Raise an exception for any illegal substrings. * `'replace'`: Replace illegal substrings with `replacement_char`. * `'ignore'`: Skip illegal substrings. replacement_char: The replacement codepoint to be used in place of invalid substrings in `input` when `errors='replace'`. name: A name for the operation (optional). Returns: A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`. * `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`. * `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`. The returned tensors are `tf.Tensor`s if `input` is a scalar, or `tf.RaggedTensor`s otherwise. #### Example: ```python >>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')] >>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8') >>> result[0].tolist() # character substrings [['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'], ['\xf0\x9f\x98\x8a']] >>> result[1].tolist() # offsets [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]] ``` """ with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]): codepoints, offsets = _unicode_decode(input, input_encoding, errors, replacement_char, False, with_offsets=True) chars = unicode_encode( ragged_array_ops.expand_dims(codepoints, -1), output_encoding=input_encoding, errors=errors, replacement_char=replacement_char) return chars, offsets def _unicode_decode(input, input_encoding, errors, replacement_char, replace_control_characters, with_offsets): """Decodes each string into a sequence of codepoints.""" input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input") input_ndims = input.shape.ndims if input_ndims is None: raise ValueError("Rank of `input` must be statically known.") if input_ndims > 1: # Convert to a ragged tensor with ragged_rank = input_ndims - 1. if not ragged_tensor.is_ragged(input): input = ragged_tensor.RaggedTensor.from_tensor( input, ragged_rank=input_ndims - 1) elif input.ragged_rank < input_ndims - 1: input = input.with_flat_values( ragged_tensor.RaggedTensor.from_tensor( input.flat_values, ragged_rank=input_ndims - input.ragged_rank + 1)) # Reshape the input to a flat vector, and apply the gen_string_ops op. if ragged_tensor.is_ragged(input): flat_input = array_ops.reshape(input.flat_values, [-1]) else: flat_input = array_ops.reshape(input, [-1]) if with_offsets: decode_op = gen_string_ops.unicode_decode_with_offsets else: decode_op = gen_string_ops.unicode_decode flat_result = decode_op( input=flat_input, input_encoding=input_encoding, errors=errors, replacement_char=replacement_char, replace_control_characters=replace_control_characters) if input_ndims == 0: codepoints = flat_result.char_values if with_offsets: offsets = flat_result.char_to_byte_starts else: codepoints = ragged_tensor.RaggedTensor.from_row_splits( flat_result.char_values, flat_result.row_splits, validate=False) if input_ndims > 1: codepoints = input.with_flat_values(codepoints) if with_offsets: offsets = ragged_tensor.RaggedTensor.from_row_splits( flat_result.char_to_byte_starts, flat_result.row_splits, validate=False) if input_ndims > 1: offsets = input.with_flat_values(offsets) if with_offsets: return codepoints, offsets else: return codepoints @tf_export("strings.split", v1=[]) def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable=redefined-builtin """Split elements of `input` based on `sep` into a `RaggedTensor`. Let N be the size of `input` (typically N will be the batch size). Split each element of `input` based on `sep` and return a `SparseTensor` or `RaggedTensor` containing the split tokens. Empty tokens are ignored. Example: ```python >>> tf.strings.split('hello world') <Tensor ['hello', 'world']> >>> tf.strings.split(['hello world', 'a b c']) <tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> ``` If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and `sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: input: A string `Tensor` of rank `N`, the strings to split. If `rank(input)` is not known statically, then it is assumed to be `1`. sep: `0-D` string `Tensor`, the delimiter string. maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. name: A name for the operation (optional). Raises: ValueError: If sep is not a string. Returns: A `RaggedTensor` of rank `N+1`, the strings split according to the delimiter. """ with ops.name_scope(name, "StringSplit", [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input, dtype=dtypes.string, name="input") if isinstance(input, ragged_tensor.RaggedTensor): return input.with_flat_values( string_split_v2(input.flat_values, sep, maxsplit)) rank = input.shape.ndims if rank == 0: return string_split_v2(array_ops.stack([input]), sep, maxsplit)[0] elif rank == 1 or rank is None: sparse_result = string_ops.string_split_v2( input, sep=sep, maxsplit=maxsplit) return ragged_tensor.RaggedTensor.from_value_rowids( values=sparse_result.values, value_rowids=sparse_result.indices[:, 0], nrows=sparse_result.dense_shape[0], validate=False) else: return string_split_v2( ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit) @tf_export(v1=["string_split"]) @deprecation.deprecated_args(None, "delimiter is deprecated, please use sep instead.", "delimiter") def string_split(source, sep=None, skip_empty=True, delimiter=None, result_type="SparseTensor", name=None): # pylint: disable=invalid-name """Split elements of `source` based on `delimiter`. Let N be the size of `source` (typically N will be the batch size). Split each element of `source` based on `delimiter` and return a `SparseTensor` or `RaggedTensor` containing the split tokens. Empty tokens are ignored. If `sep` is an empty string, each element of the `source` is split into individual strings, each containing one byte. (This includes splitting multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is treated as a set of delimiters with each considered a potential split point. Examples: ```python >>> tf.strings.split(['hello world', 'a b c']) tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]], values=['hello', 'world', 'a', 'b', 'c'] dense_shape=[2, 3]) >>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor") <tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> ``` Args: source: `1-D` string `Tensor`, the strings to split. sep: `0-D` string `Tensor`, the delimiter character, the string should be length 0 or 1. Default is ' '. skip_empty: A `bool`. If `True`, skip the empty strings from the result. delimiter: deprecated alias for `sep`. result_type: The tensor type for the result: one of `"RaggedTensor"` or `"SparseTensor"`. name: A name for the operation (optional). Raises: ValueError: If delimiter is not a string. Returns: A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according to the delimiter. The first column of the indices corresponds to the row in `source` and the second column corresponds to the index of the split component in this row. """ with ops.name_scope(name, "StringSplit", [source]): sparse_result = string_ops.string_split( source, sep=sep, skip_empty=skip_empty, delimiter=delimiter) if result_type == "SparseTensor": return sparse_result elif result_type == "RaggedTensor": return ragged_tensor.RaggedTensor.from_value_rowids( values=sparse_result.values, value_rowids=sparse_result.indices[:, 0], nrows=sparse_result.dense_shape[0], validate=False) else: raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.") # In TensorFlow 1.x, "tf.strings.split" uses the new signature (with maxsplit), # but we need to add the result_type argument. @tf_export(v1=["strings.split"]) def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redefined-builtin result_type="SparseTensor", source=None, name=None): """Split elements of `input` based on `sep`. Let N be the size of `input` (typically N will be the batch size). Split each element of `input` based on `sep` and return a `SparseTensor` or `RaggedTensor` containing the split tokens. Empty tokens are ignored. Examples: ```python >>> tf.strings.split(['hello world', 'a b c']) tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]], values=['hello', 'world', 'a', 'b', 'c'] dense_shape=[2, 3]) >>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor") <tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]> ``` If `sep` is given, consecutive delimiters are not grouped together and are deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and `sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty string, consecutive whitespace are regarded as a single separator, and the result will contain no empty strings at the start or end if the string has leading or trailing whitespace. Note that the above mentioned behavior matches python's str.split. Args: input: A string `Tensor` of rank `N`, the strings to split. If `rank(input)` is not known statically, then it is assumed to be `1`. sep: `0-D` string `Tensor`, the delimiter character. maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. result_type: The tensor type for the result: one of `"RaggedTensor"` or `"SparseTensor"`. source: alias for "input" argument. name: A name for the operation (optional). Raises: ValueError: If sep is not a string. Returns: A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split according to the delimiter. """ input = deprecation.deprecated_argument_lookup( "input", input, "source", source) with ops.name_scope(name, "StringSplit", [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input, dtype=dtypes.string, name="input") if result_type == "SparseTensor" and input.shape.rank == 1: return string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit) ragged_result = string_split_v2(input, sep=sep, maxsplit=maxsplit) if result_type == "SparseTensor": return ragged_result.to_sparse() elif result_type == "RaggedTensor": return ragged_result else: raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.") def reduce_join(inputs, axis=None, keepdims=None, separator="", name=None): """For docs, see: _RAGGED_REDUCE_DOCSTRING.""" return ragged_math_ops.ragged_reduce_aggregate( string_ops.reduce_join, string_ops.unsorted_segment_join, inputs, axis, keepdims, separator, name or "RaggedSegmentJoin") @tf_export("strings.ngrams") def ngrams(data, ngram_width, separator=" ", pad_values=None, padding_width=None, preserve_short_sequences=False, name=None): """Create a tensor of n-grams based on `data`. Creates a tensor of n-grams based on `data`. The n-grams are created by joining windows of `width` adjacent strings from the inner axis of `data` using `separator`. The input data can be padded on both the start and end of the sequence, if desired, using the `pad_values` argument. If set, `pad_values` should contain either a tuple of strings or a single string; the 0th element of the tuple will be used to pad the left side of the sequence and the 1st element of the tuple will be used to pad the right side of the sequence. The `padding_width` arg controls how many padding values are added to each side; it defaults to `ngram_width-1`. If this op is configured to not have padding, or if it is configured to add padding with `padding_width` set to less than ngram_width-1, it is possible that a sequence, or a sequence plus padding, is smaller than the ngram width. In that case, no ngrams will be generated for that sequence. This can be prevented by setting `preserve_short_sequences`, which will cause the op to always generate at least one ngram per non-empty sequence. Args: data: A Tensor or RaggedTensor containing the source data for the ngrams. ngram_width: The width(s) of the ngrams to create. If this is a list or tuple, the op will return ngrams of all specified arities in list order. Values must be non-Tensor integers greater than 0. separator: The separator string used between ngram elements. Must be a string constant, not a Tensor. pad_values: A tuple of (left_pad_value, right_pad_value), a single string, or None. If None, no padding will be added; if a single string, then that string will be used for both left and right padding. Values must be Python strings. padding_width: If set, `padding_width` pad values will be added to both sides of each sequence. Defaults to `ngram_width`-1. Must be greater than 0. (Note that 1-grams are never padded, regardless of this value.) preserve_short_sequences: If true, then ensure that at least one ngram is generated for each input sequence. In particular, if an input sequence is shorter than `min(ngram_width) + 2*pad_width`, then generate a single ngram containing the entire sequence. If false, then no ngrams are generated for these short input sequences. name: The op name. Returns: A RaggedTensor of ngrams. If `data.shape=[D1...DN, S]`, then `output.shape=[D1...DN, NUM_NGRAMS]`, where `NUM_NGRAMS=S-ngram_width+1+2*padding_width`. Raises: TypeError: if `pad_values` is set to an invalid type. ValueError: if `pad_values`, `padding_width`, or `ngram_width` is set to an invalid value. """ with ops.name_scope(name, "StringNGrams", [data]): if pad_values is None: left_pad = "" right_pad = "" elif isinstance(pad_values, (list, tuple)): if (not isinstance(pad_values[0], util_compat.bytes_or_text_types) or not isinstance(pad_values[1], util_compat.bytes_or_text_types)): raise TypeError( "pad_values must be a string, tuple of strings, or None.") left_pad = pad_values[0] right_pad = pad_values[1] else: if not isinstance(pad_values, util_compat.bytes_or_text_types): raise TypeError( "pad_values must be a string, tuple of strings, or None.") left_pad = pad_values right_pad = pad_values if padding_width is not None and padding_width < 1: raise ValueError("padding_width must be greater than 0.") if padding_width is not None and pad_values is None: raise ValueError("pad_values must be provided if padding_width is set.") data = ragged_tensor.convert_to_tensor_or_ragged_tensor( data, name="data", dtype=dtypes.string) # preserve the shape of the data if it is a tensor to_tensor = False if isinstance(data, ops.Tensor): dense_shape = array_ops.concat([array_ops.shape(data)[:-1], [-1]], axis=0) to_tensor = True if not isinstance(data, ragged_tensor.RaggedTensor): if data.shape.ndims is None: raise ValueError("Rank of data must be known.") elif data.shape.ndims == 0: raise ValueError("Data must have rank>0") elif data.shape.ndims == 1: rt = ragged_tensor.RaggedTensor.from_row_starts( data, [0], validate=False) return ngrams(rt, ngram_width, separator, pad_values, padding_width, preserve_short_sequences, name)[0] else: data = ragged_tensor.RaggedTensor.from_tensor( data, ragged_rank=data.shape.ndims - 1) if data.ragged_rank > 1: output = data.with_values( ngrams(data.values, ngram_width, separator, pad_values, padding_width, preserve_short_sequences, name)) return array_ops.reshape(output.flat_values, dense_shape) if to_tensor else output if pad_values is None: padding_width = 0 if pad_values is not None and padding_width is None: padding_width = -1 if not isinstance(ngram_width, (list, tuple)): ngram_widths = [ngram_width] else: ngram_widths = ngram_width for width in ngram_widths: if width < 1: raise ValueError("All ngram_widths must be greater than 0. Got %s" % ngram_width) output, output_splits = gen_string_ops.string_n_grams( data=data.flat_values, data_splits=data.row_splits, separator=separator, ngram_widths=ngram_widths, left_pad=left_pad, right_pad=right_pad, pad_width=padding_width, preserve_short_sequences=preserve_short_sequences) # if the input is Dense tensor, the output should also be a dense tensor output = ragged_tensor.RaggedTensor.from_row_splits( values=output, row_splits=output_splits, validate=False) return array_ops.reshape(output.flat_values, dense_shape) if to_tensor else output
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_string_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes for storing ragged tensors and their values.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python import tf2 from tensorflow.python.client import session from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_ragged_conversion_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_config from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.util.tf_export import tf_export # pylint: disable=protected-access _eval_using_default_session = ops._eval_using_default_session # pylint: enable=protected-access #=============================================================================== # RaggedTensor #=============================================================================== @tf_export("RaggedTensor") class RaggedTensor(composite_tensor.CompositeTensor): """Represents a ragged tensor. A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are dimensions whose slices may have different lengths. For example, the inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. Dimensions whose slices all have the same length are called *uniform dimensions*. The outermost dimension of a `RaggedTensor` is always uniform, since it consists of a single slice (and so there is no possibility for differing slice lengths). The total number of dimensions in a `RaggedTensor` is called its *rank*, and the number of ragged dimensions in a `RaggedTensor` is called its *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation time: it can't depend on the runtime values of `Tensor`s, and can't vary dynamically for different session runs. ### Potentially Ragged Tensors Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially ragged tensor" may be used to refer to a tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero. ### Documenting RaggedTensor Shapes When documenting the shape of a RaggedTensor, ragged dimensions can be indicated by enclosing them in parentheses. For example, the shape of a 3-D `RaggedTensor` that stores the fixed-size word embedding for each word in a sentence, for each sentence in a batch, could be written as `[num_sentences, (num_words), embedding_size]`. The parentheses around `(num_words)` indicate that dimension is ragged, and that the length of each element list in that dimension may vary for each item. ### Component Tensors Internally, a `RaggedTensor` consists of a concatenated list of values that are partitioned into variable-length rows. In particular, each `RaggedTensor` consists of: * A `values` tensor, which concatenates the variable-length rows into a flattened list. For example, the `values` tensor for `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`. * A `row_splits` vector, which indicates how those flattened values are divided into rows. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Example: ```python >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` ### Alternative Row-Partitioning Schemes In addition to `row_splits`, ragged tensors provide support for four other row-partitioning schemes: * `row_lengths`: a vector with shape `[nrows]`, which specifies the length of each row. * `value_rowids` and `nrows`: `value_rowids` is a vector with shape `[nvals]`, corresponding one-to-one with `values`, which specifies each value's row index. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an integer scalar that specifies the number of rows in the `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.) * `row_starts`: a vector with shape `[nrows]`, which specifies the start offset of each row. Equivalent to `row_splits[:-1]`. * `row_limits`: a vector with shape `[nrows]`, which specifies the stop offset of each row. Equivalent to `row_splits[1:]`. Example: The following ragged tensors are equivalent, and all represent the nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`. ```python >>> values = [3, 1, 4, 1, 5, 9, 2, 6] >>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]) >>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]) >>> rt3 = RaggedTensor.from_value_rowids( ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) >>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]) >>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]) ``` ### Multiple Ragged Dimensions `RaggedTensor`s with multiple ragged dimensions can be defined by using a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` adds a single ragged dimension. ```python >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) >>> outer_rt = RaggedTensor.from_row_splits( ... values=inner_rt, row_splits=[0, 3, 3, 5]) >>> print outer_rt.to_list() [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] >>> print outer_rt.ragged_rank 2 ``` The factory function `RaggedTensor.from_nested_row_splits` may be used to construct a `RaggedTensor` with multiple ragged dimensions directly, by providing a list of `row_splits` tensors: ```python >>> RaggedTensor.from_nested_row_splits( ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6], ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list() [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]] ``` ### Uniform Inner Dimensions `RaggedTensor`s with uniform inner dimensions can be defined by using a multidimensional `Tensor` for `values`. ```python >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]), .. row_splits=[0, 2, 5]) >>> print rt.to_list() [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]] >>> print rt.shape (2, ?, 3) ``` ### RaggedTensor Shape Restrictions The shape of a RaggedTensor is currently restricted to have the following form: * A single uniform dimension * Followed by one or more ragged dimensions * Followed by zero or more uniform dimensions. This restriction follows from the fact that each nested `RaggedTensor` replaces the uniform outermost dimension of its `values` with a uniform dimension followed by a ragged dimension. """ #============================================================================= # Constructor (private) #============================================================================= def __init__(self, values, row_splits, cached_row_lengths=None, cached_value_rowids=None, cached_nrows=None, internal=False): """Creates a `RaggedTensor` with a specified partitioning for `values`. This constructor is private -- please use one of the following ops to build `RaggedTensor`s: * `tf.RaggedTensor.from_row_lengths` * `tf.RaggedTensor.from_value_rowids` * `tf.RaggedTensor.from_row_splits` * `tf.RaggedTensor.from_row_starts` * `tf.RaggedTensor.from_row_limits` * `tf.RaggedTensor.from_nested_row_splits` * `tf.RaggedTensor.from_nested_row_lengths` * `tf.RaggedTensor.from_nested_value_rowids` Args: values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`. row_splits: A 1-D integer tensor with shape `[nrows+1]`. cached_row_lengths: A 1-D integer tensor with shape `[nrows]` cached_value_rowids: A 1-D integer tensor with shape `[nvals]`. cached_nrows: A 1-D integer scalar tensor. internal: True if the constructor is being called by one of the factory methods. If false, an exception will be raised. Raises: TypeError: If a row partitioning tensor has an inappropriate dtype. TypeError: If exactly one row partitioning argument was not specified. ValueError: If a row partitioning tensor has an inappropriate shape. ValueError: If multiple partitioning arguments are specified. ValueError: If nrows is specified but value_rowids is not None. """ if not internal: raise ValueError("RaggedTensor constructor is private; please use one " "of the factory methods instead (e.g., " "RaggedTensor.from_row_lengths())") # Validate the arguments. if not isinstance(row_splits, ops.Tensor): raise TypeError("Row-partitioning argument must be a Tensor, got %r" % row_splits) if not isinstance(values, (RaggedTensor, ops.Tensor)): raise TypeError("values must be a Tensor or RaggedTensor, got %r" % values) if row_splits.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("Row-partitioning argument must be int32 or int64") # Validate shapes & dtypes. row_splits.shape.assert_has_rank(1) values.shape.with_rank_at_least(1) row_splits.set_shape([None]) if isinstance(values, RaggedTensor): assert row_splits.dtype == values.row_splits.dtype self._values = values self._row_splits = row_splits # Store any cached tensors. These are used to avoid unnecessary # round-trip conversions when a RaggedTensor is constructed from # lengths or rowids, and we later want those lengths/rowids back. for tensor in [cached_row_lengths, cached_value_rowids, cached_nrows]: if tensor is not None: if not isinstance(tensor, ops.Tensor): raise TypeError("Cached value must be a Tensor or None.") elif tensor.dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cached value must be int32 or int64.") self._cached_row_lengths = cached_row_lengths self._cached_value_rowids = cached_value_rowids self._cached_nrows = cached_nrows #============================================================================= # Factory Methods #============================================================================= @classmethod def from_value_rowids(cls, values, value_rowids, nrows=None, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `value_rowids`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [[values[i] for i in range(len(values)) if value_rowids[i] == row] for row in range(nrows)] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds one-to-one with `values`, and specifies each value's row index. Must be nonnegative, and must be sorted in ascending order. nrows: An integer scalar specifying the number of rows. This should be specified if the `RaggedTensor` may containing empty training rows. Must be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty). Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty). name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. Raises: ValueError: If `nrows` is incompatible with `value_rowids`. #### Example: ```python >>> print(tf.RaggedTensor.from_value_rowids( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], ... nrows=5)) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromValueRowIds", [values, value_rowids, nrows]): values, value_rowids = cls._convert_values_and_row_partition( values, value_rowids, "value_rowids") if nrows is None: const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is None: nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1 const_nrows = None else: const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0 nrows = ops.convert_to_tensor(const_nrows, value_rowids.dtype, name="nrows") else: nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows") const_nrows = tensor_util.constant_value(nrows) if const_nrows is not None: if const_nrows < 0: raise ValueError("Expected nrows >= 0; got %d" % const_nrows) const_rowids = tensor_util.constant_value(value_rowids) if const_rowids is not None and const_rowids.size > 0: if not const_nrows >= const_rowids[-1] + 1: raise ValueError( "Expected nrows >= value_rowids[-1] + 1; got nrows=%d, " "value_rowids[-1]=%d" % (const_nrows, const_rowids[-1])) value_rowids.shape.assert_has_rank(1) nrows.shape.assert_has_rank(0) values.shape[:1].assert_is_compatible_with(value_rowids.shape) if validate: msg = "Arguments to from_value_rowids do not form a valid RaggedTensor" nvals1 = _nrows(values) nvals2 = _nrows(value_rowids) checks = [ check_ops.assert_rank(value_rowids, 1, message=msg), check_ops.assert_rank(nrows, 0, message=msg), check_ops.assert_equal(nvals1, nvals2, message=msg), check_ops.assert_non_negative(value_rowids[:1], message=msg), _assert_monotonic_increasing(value_rowids, message=msg), check_ops.assert_less(value_rowids[-1:], nrows, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) value_rowids = control_flow_ops.with_dependencies(checks, value_rowids) # Convert value_rowids & nrows to row_splits. # Note: we don't use segment_ids_to_row_splits() here because we want # to save the intermediate value `row_lengths`, so we can cache it. # TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the # cast. value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32) nrows_int32 = math_ops.cast(nrows, dtypes.int32) row_lengths = math_ops.bincount( value_rowids_int32, minlength=nrows_int32, maxlength=nrows_int32, dtype=value_rowids.dtype) row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0) if const_nrows is not None: row_lengths.set_shape([const_nrows]) row_splits.set_shape([const_nrows + 1]) return cls( values, row_splits, cached_row_lengths=row_lengths, cached_value_rowids=value_rowids, cached_nrows=nrows, internal=True) @classmethod def from_row_splits(cls, values, row_splits, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_splits`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [values[row_splits[i]:row_splits[i + 1]] for i in range(len(row_splits) - 1)] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be empty, and must be sorted in ascending order. `row_splits[0]` must be zero and `row_splits[-1]` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. Raises: ValueError: If `row_splits` is an empty list. #### Example: ```python >>> print(tf.RaggedTensor.from_row_splits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_splits=[0, 4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(row_splits, (list, tuple)) and not row_splits: raise ValueError("row_splits tensor may not be empty.") if isinstance(row_splits, tensor_spec.TensorSpec): return cls(values=values, row_splits=row_splits, internal=True) with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]): values, row_splits = cls._convert_values_and_row_partition( values, row_splits, "row_splits") row_splits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_splits do not form a valid RaggedTensor" nvals = _nrows(values, row_splits.dtype) checks = [ check_ops.assert_rank(row_splits, 1, message=msg), _assert_zero(row_splits[0], message=msg), _assert_monotonic_increasing(row_splits, message=msg), check_ops.assert_equal(row_splits[-1], nvals, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_splits = control_flow_ops.with_dependencies(checks, row_splits) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_row_lengths(cls, values, row_lengths, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_lengths`. The returned `RaggedTensor` corresponds with the python list defined by: ```python result = [[values.pop(0) for i in range(length)] for length in row_lengths] ``` Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative. `sum(row_lengths)` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_lengths( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_lengths=[4, 0, 3, 1, 0])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]): values, row_lengths = cls._convert_values_and_row_partition( values, row_lengths, "row_lengths") row_lengths.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_lengths do not form a valid RaggedTensor" nvals1 = math_ops.reduce_sum(row_lengths) nvals2 = _nrows(values, row_lengths.dtype) checks = [ check_ops.assert_rank(row_lengths, 1, message=msg), check_ops.assert_non_negative(row_lengths, message=msg), check_ops.assert_equal(nvals1, nvals2, message=msg) ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_lengths = control_flow_ops.with_dependencies(checks, row_lengths) row_limits = math_ops.cumsum(row_lengths) row_splits = array_ops.concat([[0], row_limits], axis=0) return cls( values=values, row_splits=row_splits, cached_row_lengths=row_lengths, internal=True) @classmethod def from_row_starts(cls, values, row_starts, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_starts`. Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`. Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_starts: A 1-D integer tensor with shape `[nrows]`. Must be nonnegative and sorted in ascending order. If `nrows>0`, then `row_starts[0]` must be zero. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_starts( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_starts=[0, 4, 4, 7, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]): values, row_starts = cls._convert_values_and_row_partition( values, row_starts, "row_starts") row_starts.shape.assert_has_rank(1) nvals = _nrows(values, row_starts.dtype) if validate: msg = "Arguments to from_row_starts do not form a valid RaggedTensor" checks = [ check_ops.assert_rank(row_starts, 1, message=msg), _assert_zero(row_starts[:1], message=msg), _assert_monotonic_increasing(row_starts, message=msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg), ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_starts = control_flow_ops.with_dependencies(checks, row_starts) row_splits = array_ops.concat([row_starts, [nvals]], axis=0) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_row_limits(cls, values, row_limits, name=None, validate=True): """Creates a `RaggedTensor` with rows partitioned by `row_limits`. Equivalent to: `from_row_splits(values, concat([0, row_limits]))`. Args: values: A potentially ragged tensor with shape `[nvals, ...]`. row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. #### Example: ```python >>> print(tf.RaggedTensor.from_row_limits( ... values=[3, 1, 4, 1, 5, 9, 2, 6], ... row_limits=[4, 4, 7, 8, 8])) <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]> ``` """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]): values, row_limits = cls._convert_values_and_row_partition( values, row_limits, "row_limits") row_limits.shape.assert_has_rank(1) if validate: msg = "Arguments to from_row_limits do not form a valid RaggedTensor" nvals = _nrows(values, row_limits.dtype) checks = [ check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg), check_ops.assert_equal(row_limits[-1:], nvals, message=msg) ] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_limits = control_flow_ops.with_dependencies(checks, row_limits) zero = array_ops.zeros([1], row_limits.dtype) row_splits = array_ops.concat([zero, row_limits], axis=0) return cls(values=values, row_splits=row_splits, internal=True) @classmethod def from_nested_value_rowids(cls, flat_values, nested_value_rowids, nested_nrows=None, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `value_rowids` tensors. Equivalent to: ```python result = flat_values for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)): result = from_value_rowids(result, rowids, nrows) ``` Args: flat_values: A potentially ragged tensor. nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is used as the `value_rowids` for the `i`th ragged dimension. nested_nrows: A list of integer scalars. The `i`th scalar is used as the `nrows` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty). Raises: ValueError: If `len(nested_values_rowids) != len(nested_nrows)`. """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_value_rowids, ops.Tensor): raise TypeError("nested_value_rowids must be a list of Tensors") if nested_nrows is None: nested_nrows = [None] * len(nested_value_rowids) else: if isinstance(nested_nrows, ops.Tensor): raise TypeError("nested_nrows must be a list of Tensors") if len(nested_nrows) != len(nested_value_rowids): raise ValueError("nested_nrows must have the same length as " "nested_value_rowids") with ops.name_scope( name, "RaggedFromNestedValueRowIds", [flat_values] + list(nested_value_rowids) + list(nested_nrows)): result = flat_values for value_rowids, nrows in reversed( list(zip(nested_value_rowids, nested_nrows))): result = cls.from_value_rowids(result, value_rowids, nrows, validate=validate) return result @classmethod def from_nested_row_splits(cls, flat_values, nested_row_splits, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `row_splits` tensors. Equivalent to: ```python result = flat_values for row_splits in reversed(nested_row_splits): result = from_row_splits(result, row_splits) ``` Args: flat_values: A potentially ragged tensor. nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is used as the `row_splits` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty). """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_row_splits, ops.Tensor): raise TypeError("nested_row_splits must be a list of Tensors") with ops.name_scope(name, "RaggedFromNestedRowSplits", [flat_values] + list(nested_row_splits)): result = flat_values for splits in reversed(nested_row_splits): result = cls.from_row_splits(result, splits, validate=validate) return result @classmethod def from_nested_row_lengths(cls, flat_values, nested_row_lengths, name=None, validate=True): """Creates a `RaggedTensor` from a nested list of `row_lengths` tensors. Equivalent to: ```python result = flat_values for row_lengths in reversed(nested_row_lengths): result = from_row_lengths(result, row_lengths) ``` Args: flat_values: A potentially ragged tensor. nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is used as the `row_lengths` for the `i`th ragged dimension. name: A name prefix for the RaggedTensor (optional). validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty). """ if not isinstance(validate, bool): raise TypeError("validate must have type bool") if isinstance(nested_row_lengths, ops.Tensor): raise TypeError("nested_row_lengths must be a list of Tensors") with ops.name_scope(name, "RaggedFromNestedRowlengths", [flat_values] + list(nested_row_lengths)): result = flat_values for lengths in reversed(nested_row_lengths): result = cls.from_row_lengths(result, lengths, validate=validate) return result @classmethod def _convert_values_and_row_partition(cls, values, partition, name): """Converts `values` and `partition` to Tensors. If `values` is a `RaggedTensor`, then converts `values` and `partition` to have compatible row-partitioning dtypes. In particular, if any of the row partitioning tensors are `int64`, then all of the other row partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype() is true) or an error will be raised (if auto_cast_partition_dtype() is false). Args: values: The `values` for the `RaggedTensor` being constructed. partition: A row-partitioning tensor for the `RaggedTensor` being constructed. I.e., one of: row_splits, row_lengths, row_starts, row_limits, value_rowids. name: The name of the row-partitioning tensor. Returns: A tuple (values, partition). """ if isinstance(values, RaggedTensor): if isinstance(partition, ops.Tensor): if partition.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("%s must have dtype int32 or int64" % name) if values.row_splits.dtype != partition.dtype: if not ragged_config.auto_cast_partition_dtype(): raise ValueError("dtype mismatch: %s (%s) vs values.row_splits (%s)" % (name, partition.dtype, values.row_splits.dtype)) partition = math_ops.cast(partition, dtypes.int64) values = values.with_row_splits_dtype(dtypes.int64) else: partition = ops.convert_to_tensor(partition, values.row_splits.dtype, name=name) else: values = ops.convert_to_tensor(values, name="values") if isinstance(partition, np.ndarray) and partition.dtype == np.int32: partition = ops.convert_to_tensor(partition, name=name) else: partition = ops.convert_to_tensor( partition, preferred_dtype=dtypes.int64, name=name) if partition.dtype not in (dtypes.int32, dtypes.int64): raise ValueError("%s must have dtype int32 or int64" % name) return (values, partition) #============================================================================= # Accessors #============================================================================= @property def dtype(self): """The `DType` of values in this tensor.""" return self._values.dtype @property def shape(self): """The statically known shape of this ragged tensor. Returns: A `TensorShape` containing the statically known shape of this ragged tensor. Ragged dimensions have a size of `None`. Examples: ```python >>> ragged.constant([[0], [1, 2]]).shape TensorShape([Dimension(2), Dimension(None)]) >>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape TensorShape([Dimension(2), Dimension(None), Dimension(2) ``` """ nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1 values_shape = self._values.shape value_shape = values_shape[1:] return tensor_shape.TensorShape([nrows, None]).concatenate(value_shape) @property def ragged_rank(self): """The number of ragged dimensions in this ragged tensor. Returns: A Python `int` indicating the number of ragged dimensions in this ragged tensor. The outermost dimension is not considered ragged. """ values_is_ragged = isinstance(self._values, RaggedTensor) return self._values.ragged_rank + 1 if values_is_ragged else 1 @property def values(self): """The concatenated rows for this ragged tensor. `rt.values` is a potentially ragged tensor formed by flattening the two outermost dimensions of `rt` into a single dimension. `rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the number of items in the outer two dimensions of `rt`). `rt.ragged_rank = self.ragged_rank - 1` Returns: A potentially ragged tensor. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) ``` """ return self._values @property def row_splits(self): """The row-split indices for this ragged tensor's `values`. `rt.row_splits` specifies where the values for each row begin and end in `rt.values`. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. Returns: A 1-D integer `Tensor` with shape `[self.nrows+1]`. The returned tensor is non-empty, and is sorted in ascending order. `self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to `self.values.shape[0]`. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print rt.row_splits # indices of row splits in rt.values tf.Tensor([0, 4, 4, 7, 8, 8]) ``` """ return self._row_splits @property def flat_values(self): """The innermost `values` tensor for this ragged tensor. Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is `rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`. Conceptually, `flat_values` is the tensor formed by flattening the outermost dimension and all of the ragged dimensions into a single dimension. `rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]` (where `nvals` is the number of items in the flattened dimensions). Returns: A `Tensor`. #### Example: ```python >>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) >>> print rt.flat_values() tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) ``` """ rt_values = self.values while isinstance(rt_values, RaggedTensor): rt_values = rt_values.values return rt_values @property def nested_row_splits(self): """A tuple containing the row_splits for all ragged dimensions. `rt.nested_row_splits` is a tuple containing the `row_splits` tensors for all ragged dimensions in `rt`, ordered from outermost to innermost. In particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where: * `value_splits = ()` if `rt.values` is a `Tensor`. * `value_splits = rt.values.nested_row_splits` otherwise. Returns: A `tuple` of 1-D integer `Tensor`s. #### Example: ```python >>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) >>> for i, splits in enumerate(rt.nested_row_splits()): ... print('Splits for dimension %d: %s' % (i+1, splits)) Splits for dimension 1: [0, 1] Splits for dimension 2: [0, 3, 3, 5] Splits for dimension 3: [0, 4, 4, 7, 8, 8] ``` """ rt_nested_splits = [self.row_splits] rt_values = self.values while isinstance(rt_values, RaggedTensor): rt_nested_splits.append(rt_values.row_splits) rt_values = rt_values.values return tuple(rt_nested_splits) def value_rowids(self, name=None): """Returns the row indices for the `values` in this ragged tensor. `rt.value_rowids()` corresponds one-to-one with the outermost dimension of `rt.values`, and specifies the row containing each value. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `rt.value_rowids()[j] == row`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer `Tensor` with shape `self.values.shape[:1]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.value_rowids() tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values ``` """ if self._cached_value_rowids is not None: return self._cached_value_rowids with ops.name_scope(name, "RaggedValueRowIds", [self]): return segment_id_ops.row_splits_to_segment_ids(self.row_splits) def nested_value_rowids(self, name=None): """Returns a tuple containing the value_rowids for all ragged dimensions. `rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors for all ragged dimensions in `rt`, ordered from outermost to innermost. In particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids` where: * `value_ids = ()` if `rt.values` is a `Tensor`. * `value_ids = rt.values.nested_value_rowids` otherwise. Args: name: A name prefix for the returned tensors (optional). Returns: A `tuple` of 1-D integer `Tensor`s. #### Example: ```python >>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]]) >>> for i, ids in enumerate(rt.nested_value_rowids()): ... print('row ids for dimension %d: %s' % (i+1, ids)) row ids for dimension 1: [0] row ids for dimension 2: [0, 0, 0, 2, 2] row ids for dimension 3: [0, 0, 0, 0, 2, 2, 2, 3] ``` """ with ops.name_scope(name, "RaggedNestedValueRowIds", [self]): rt_nested_ids = [self.value_rowids()] rt_values = self.values while isinstance(rt_values, RaggedTensor): rt_nested_ids.append(rt_values.value_rowids()) rt_values = rt_values.values return tuple(rt_nested_ids) def nrows(self, out_type=None, name=None): """Returns the number of rows in this ragged tensor. I.e., the size of the outermost dimension of the tensor. Args: out_type: `dtype` for the returned tensor. Defaults to `self.row_splits.dtype`. name: A name prefix for the returned tensor (optional). Returns: A scalar `Tensor` with dtype `out_type`. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.nrows() # rt has 5 rows. 5 ``` """ if out_type is None: out_type = self._row_splits.dtype else: out_type = dtypes.as_dtype(out_type) if self._cached_nrows is not None: return math_ops.cast(self._cached_nrows, out_type) with ops.name_scope(name, "RaggedNRows", [self]): return array_ops.shape(self.row_splits, out_type=out_type)[0] - 1 def row_starts(self, name=None): """Returns the start indices for rows in this ragged tensor. These indices specify where the values for each row begin in `self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape `[nrows]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.row_starts() # indices of row starts in rt.values tf.Tensor([0, 4, 4, 7, 8]) ``` """ with ops.name_scope(name, "RaggedRowStarts", [self]): return self.row_splits[:-1] def row_limits(self, name=None): """Returns the limit indices for rows in this ragged tensor. These indices specify where the values for each row end in `self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`. Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer Tensor with shape `[nrows]`. The returned tensor is nonnegative, and is sorted in ascending order. #### Example: ```python >>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> rt.values tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6]) >>> rt.row_limits() # indices of row limits in rt.values tf.Tensor([4, 4, 7, 8, 8]) ``` """ with ops.name_scope(name, "RaggedRowLimits", [self]): return self.row_splits[1:] def row_lengths(self, axis=1, name=None): """Returns the lengths of the rows in this ragged tensor. `rt.row_lengths()[i]` indicates the number of values in the `i`th row of `rt`. Args: axis: An integer constant indicating the axis whose row lengths should be returned. name: A name prefix for the returned tensor (optional). Returns: A potentially ragged integer Tensor with shape `self.shape[:axis]`. Raises: ValueError: If `axis` is out of bounds. #### Example: ```python >>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []]) >>> rt.row_lengths(rt) # lengths of rows in rt tf.Tensor([2, 0, 2, 1, 0]) >>> rt.row_lengths(axis=2) # lengths of axis=2 rows. <tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]> ``` """ if self._cached_row_lengths is not None: return self._cached_row_lengths with ops.name_scope(name, "RaggedRowLengths", [self]): axis = ragged_util.get_positive_axis(axis, self.shape.ndims) if axis == 0: return self.nrows() elif axis == 1: splits = self.row_splits return splits[1:] - splits[:-1] elif isinstance(self.values, RaggedTensor): return self.with_values(self.values.row_lengths(axis - 1)) else: shape = array_ops.shape(self.values, out_type=self._row_splits.dtype) return self.with_values( array_ops.ones(shape[:axis - 1], self._row_splits.dtype) * shape[axis - 1]) def nested_row_lengths(self, name=None): """Returns a tuple containing the row_lengths for all ragged dimensions. `rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors for all ragged dimensions in `rt`, ordered from outermost to innermost. Args: name: A name prefix for the returned tensors (optional). Returns: A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to `self.ragged_rank`. """ with ops.name_scope(name, "RaggedNestedRowLengths", [self]): rt_nested_row_lengths = [] rt = self while isinstance(rt, RaggedTensor): rt_nested_row_lengths.append(rt.row_lengths()) rt = rt.values return tuple(rt_nested_row_lengths) def bounding_shape(self, axis=None, name=None, out_type=None): """Returns the tight bounding box shape for this `RaggedTensor`. Args: axis: An integer scalar or vector indicating which axes to return the bounding box for. If not specified, then the full bounding box is returned. name: A name prefix for the returned tensor (optional). out_type: `dtype` for the returned tensor. Defaults to `self.row_splits.dtype`. Returns: An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not specified, then `output` is a vector with `output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the `output` is a scalar. If `axis` is a vector, then `output` is a vector, where `output[i]` is the bounding size for dimension `axis[i]`. #### Example: ```python >>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]) >>> rt.bounding_shape() [5, 4] ``` """ if out_type is None: out_type = self._row_splits.dtype else: out_type = dtypes.as_dtype(out_type) with ops.name_scope(name, "RaggedBoundingBox", [self, axis]): nested_splits = self.nested_row_splits rt_flat_values = self.flat_values # Optimized special cases for when axis=0 or axis=1: if isinstance(axis, int): if axis == 0: return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1 elif axis == 1: return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0) splits_shape = array_ops.shape(self.row_splits, out_type=out_type) flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type) ragged_dimensions = array_ops.stack([splits_shape[0] - 1] + [ math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0) for splits in nested_splits ]) inner_dimensions = flat_values_shape[1:] bbox = array_ops.concat([ragged_dimensions, inner_dimensions], axis=0) return bbox if axis is None else array_ops.gather(bbox, axis) #============================================================================= # Transformation #============================================================================= def with_values(self, new_values): """Returns a copy of `self` with `values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor to use as the `values` for the returned `RaggedTensor`. Must have `rank > 0`, and must have the same number of rows as `self.values`. Returns: A `RaggedTensor`. `result.rank = 1 + new_values.rank`. `result.ragged_rank = 1 + new_values.ragged_rank` """ new_values.shape.with_rank_at_least(1) self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1]) if (isinstance(new_values, RaggedTensor) and self._row_splits.dtype != new_values.row_splits.dtype): if not ragged_config.auto_cast_partition_dtype(): raise ValueError("self and new_values have mismatched row_splits " "dtypes; use RaggedTensor.with_row_splits_dtype() to " "convert them to compatible dtypes.") new_values = new_values.with_row_splits_dtype(dtypes.int64) return self.with_row_splits_dtype(dtypes.int64).with_values(new_values) return RaggedTensor( new_values, self._row_splits, self._cached_row_lengths, self._cached_value_rowids, self._cached_nrows, internal=True) def with_flat_values(self, new_values): """Returns a copy of `self` with `flat_values` replaced by `new_value`. Preserves cached row-partitioning tensors such as `self.cached_nrows` and `self.cached_value_rowids` if they have values. Args: new_values: Potentially ragged tensor that should replace `self.flat_values`. Must have `rank > 0`, and must have the same number of rows as `self.flat_values`. Returns: A `RaggedTensor`. `result.rank = self.ragged_rank + new_values.rank`. `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`. """ if isinstance(self._values, ops.Tensor): return self.with_values(new_values) else: return self.with_values(self.values.with_flat_values(new_values)) def with_row_splits_dtype(self, dtype): """Returns a copy of this RaggedTensor with the given `row_splits` dtype. For RaggedTensors with multiple ragged dimensions, the `row_splits` for all nested `RaggedTensor` objects are cast to the given dtype. Args: dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`. Returns: A copy of this RaggedTensor, with the `row_splits` cast to the given type. """ dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.int32, dtypes.int64): raise ValueError("dtype must be int32 or int64") if self._row_splits.dtype == dtype: return self row_splits = math_ops.cast(self._row_splits, dtype) values = self._values if isinstance(values, RaggedTensor): values = values.with_row_splits_dtype(dtype) cached_row_lengths = self._cached_row_lengths if cached_row_lengths is not None: cached_row_lengths = math_ops.cast(cached_row_lengths, dtype) cached_value_rowids = self._cached_value_rowids if cached_value_rowids is not None: cached_value_rowids = math_ops.cast(cached_value_rowids, dtype) cached_nrows = self._cached_nrows if cached_value_rowids is not None: cached_value_rowids = math_ops.cast(cached_value_rowids, dtype) return RaggedTensor(values, row_splits, cached_row_lengths, cached_value_rowids, cached_nrows, internal=True) #============================================================================= # Tensor Type Conversions #============================================================================= @classmethod def from_tensor(cls, tensor, lengths=None, padding=None, ragged_rank=1, name=None, row_splits_dtype=dtypes.int64): """Converts a `tf.Tensor` into a `RaggedTensor`. The set of absent/default values may be specified using a vector of lengths or a padding value (but not both). If `lengths` is specified, then the output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If 'lengths' is a list of lists or tuple of lists, those lists will be used as nested row lengths. If `padding` is specified, then any row *suffix* consisting entirely of `padding` will be excluded from the returned `RaggedTensor`. If neither `lengths` nor `padding` is specified, then the returned `RaggedTensor` will have no absent/default values. Examples: ```python >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]]) >>> tf.RaggedTensor.from_tensor(dt) <tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]> >>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]) <tf.RaggedTensor [[5], [], [6, 0, 0]]> >>> tf.RaggedTensor.from_tensor(dt, padding=0) <tf.RaggedTensor [[5, 7], [0, 3], [6]]> >>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]], [[0, 0], [3, 0], [0, 0]], [[6, 0], [0, 0], [0, 0]]]) >>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1])) <tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]> ``` Args: tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or higher. lengths: An optional set of row lengths, specified using a 1-D integer `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows in `tensor`). If specified, then `output[row]` will contain `tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You may optionally pass a list or tuple of lengths to this argument, which will be used as nested row lengths to construct a ragged tensor with multiple ragged dimensions. padding: An optional padding value. If specified, then any row suffix consisting entirely of `padding` will be excluded from the returned RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor` and with `shape=tensor.shape[ragged_rank + 1:]`. ragged_rank: Integer specifying the ragged rank for the returned `RaggedTensor`. Must be greater than zero. name: A name prefix for the returned tensors (optional). row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` with the specified `ragged_rank`. The shape of the returned ragged tensor is compatible with the shape of `tensor`. Raises: ValueError: If both `lengths` and `padding` are specified. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if lengths is not None and padding is not None: raise ValueError("Specify lengths or padding, but not both") if not isinstance(ragged_rank, int): raise TypeError("ragged_rank expected int, got %r" % ragged_rank) if ragged_rank <= 0: raise ValueError( "ragged_rank must be greater than 0; got %s" % ragged_rank) with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]): tensor = ops.convert_to_tensor(tensor, name="tensor") tensor.shape.with_rank_at_least(ragged_rank + 1) input_shape = array_ops.shape(tensor, out_type=row_splits_dtype) ncols = input_shape[1] # Handle ragged_rank>1 via recursion: # If the output should have multiple ragged dimensions, then first # flatten the tensor to eliminate all but the last ragged dimension, # and recursively convert that flattened tensor. Then add on the splits # for the dimensions that we flattened out. if ragged_rank > 1: # Flatten `tensor` to eliminate all but the last ragged dimension. new_shape = array_ops.concat([ constant_op.constant([-1], row_splits_dtype), input_shape[ragged_rank:] ], axis=0) flattened = array_ops.reshape(tensor, new_shape) # Recursively convert the flattened tensor. values = cls.from_tensor(flattened, lengths, padding, row_splits_dtype=row_splits_dtype) # The total number of elements in each dimension. E.g., if # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total. dim_size = math_ops.cumprod(input_shape) # Construct splits tensors for the dimensions that were flattened. new_splits = [ math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim] for dim in range(1, ragged_rank) ] return cls.from_nested_row_splits(values, new_splits, validate=False) # If padding was specified, then use it to find row lengths. if padding is not None: padding = ops.convert_to_tensor( padding, name="padding", dtype=tensor.dtype) padding.shape.assert_is_compatible_with(tensor.shape[2:]) # Find places where the padding is equal to the tensor. (This will # broadcast `padding` across the outermost 2 dimensions of `tensor`, # so `has_default_value.shape = tensor.shape`.) has_default_value = math_ops.equal(padding, tensor) # If the padding isn't a scalar, then require that all values in the # padding match each item in the tensor. After this block of code, # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just # use reduce_all for both cases, becaue when you pass an empty `axis` # list to reduce_all, it reduces all axes; but we want it to reduce no # axes -- i.e., to be a no-op.) tensor_rank = array_ops.rank(tensor) reduce_axis = math_ops.range(2, tensor_rank) has_default = control_flow_ops.cond( tensor_rank > 2, lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis), lambda: has_default_value) has_default.set_shape(tensor_shape.TensorShape([None, None])) has_default.set_shape(tensor.shape[:2]) # Use has_default to find the length of each row: for each # non-default item in a row, calculate the length that the row needs to # have to include that item; and then take the max of those values # (across each row). has_nondefault = math_ops.logical_not(has_default) has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype) length_for_nondefault_value = ( has_nondefault * array_ops.expand_dims( math_ops.range(1, ncols + 1), 0)) lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1) if lengths is not None: if isinstance(lengths, (list, tuple)) and len(lengths) and not isinstance( lengths[0], (int, float)): # In this case, we've been given nested row lengths. Rather than # reconstructing the tensor mask directly, we can recreate it as # a boolean RaggedTensor, then densify that and use that as the # mask to clear out the unused data in the passed tensor. tensor.shape.with_rank_at_least(len(lengths) + 1) num_tokens = math_ops.reduce_sum(lengths[-1]) ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool) ragged_mask = cls.from_nested_row_lengths( ones_mask, lengths, validate=False) dense_ragged_mask = ragged_mask.to_tensor(default_value=False) masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask) return cls.from_nested_row_lengths( masked_data, lengths, validate=False) else: # If we have lengths (either directly supplied, or computed from # paddings), then use those to construct splits; and then use masking # to get the corresponding values. lengths = ragged_util.convert_to_int_tensor(lengths, "lengths", row_splits_dtype) lengths.shape.assert_has_rank(1) lengths = math_ops.minimum(lengths, ncols) lengths = math_ops.maximum(lengths, 0) limits = math_ops.cumsum(lengths) splits = array_ops.concat( [array_ops.zeros([1], row_splits_dtype), limits], axis=0) mask = array_ops.sequence_mask(lengths, maxlen=ncols) values = array_ops.boolean_mask(tensor, mask) return cls.from_row_splits(values, splits, validate=False) # If neither padding nor lengths were specified, then create a splits # vector that contains no default values, and reshape the input tensor # to form the values for the RaggedTensor. nrows = input_shape[0] nvals = nrows * ncols splits = math_ops.range(nrows + 1) * ncols values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0) values = array_ops.reshape(tensor, values_shape) return cls.from_row_splits(values, splits, validate=False) def to_tensor(self, default_value=None, name=None): """Converts this `RaggedTensor` into a `tf.Tensor`. Example: ```python >>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]]) >>> print rt.to_tensor() [[9 8 7] [0 0 0] [6 5 0] [4 0 0]] ``` Args: default_value: Value to set for indices not specified in `self`. Defaults to zero. `default_value` must be broadcastable to `self.shape[self.ragged_rank + 1:]`. name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with shape `ragged.bounding_shape(self)` and the values specified by the non-empty values in `self`. Empty values are assigned `default_value`. """ with ops.name_scope(name, "RaggedToTensor", [self, default_value]): if default_value is not None: default_value = ops.convert_to_tensor( default_value, name="default_value", dtype=self.dtype) # If ragged_rank > 1, then recursively convert the ragged values into a # `Tensor` before we proceed. values = self.values if is_ragged(values): values = values.to_tensor(default_value) # Tile the default value, if necessary. if default_value is not None: if values.shape.ndims is not None: default_value.shape.with_rank_at_most(values.shape.ndims - 1) if (values.shape.ndims is None or default_value.shape.ndims is None or values.shape.ndims != default_value.shape.ndims + 1): value_shape = array_ops.shape(values)[1:] default_value = array_ops.broadcast_to(default_value, value_shape) default_value.shape.assert_is_compatible_with(values.shape[1:]) # Get the expected dense shape ([nrows, ncols] + value_shape). rt_row_lengths = [self.row_splits[1:] - self.row_splits[:-1]] nrows = array_ops.shape(self.row_splits, out_type=self._row_splits.dtype)[0] - 1 ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0) values_shape = array_ops.shape(values, out_type=self._row_splits.dtype) value_shape = values_shape[1:] nvals = values_shape[0] # Build a default value if none was supplied. if default_value is None: default_value = array_ops.zeros(value_shape, dtype=values.dtype) default_value.shape.assert_is_compatible_with(values.shape[1:]) default_value.set_shape(values.shape[1:]) # Get the row start indices, and expand to shape=[nrows, 1]. starts = array_ops.expand_dims(self.row_splits[:-1], 1) # Get the row limit indices, and expand to shape=[nrows, 1]. limits = array_ops.expand_dims(self.row_splits[1:], 1) # Get the column indices, and expand to shape=[1, ncols]. columns = array_ops.expand_dims(math_ops.range(0, ncols), 0) # Build a list containing the values plus the default value. We will use # tf.gather to collect values from this list for the `Tensor` (using # nvals as the index for the default value). values_and_default = array_ops.concat( [values, array_ops.stack([default_value])], axis=0) # Construct a matrix "indices" pointing into values_and_default. I.e., # output[r, c] = values_and_default[indices[r, c]. nondefault_index = starts + columns has_value = nondefault_index < limits default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals) indices = array_ops.where(has_value, nondefault_index, default_index) # Gather the results into a `Tensor`. return array_ops.gather(values_and_default, indices) @classmethod def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64): """Converts a 2D `tf.SparseTensor` to a `RaggedTensor`. Each row of the `output` `RaggedTensor` will contain the explicit values from the same row in `st_input`. `st_input` must be ragged-right. If not it is not ragged-right, then an error will be generated. Example: ```python >>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]], ... values=[1, 2, 3, 4, 5], ... dense_shape=[4, 3]) >>> rt.RaggedTensor.from_sparse(st).eval().tolist() [[1, 2, 3], [4], [], [5]] ``` Currently, only two-dimensional `SparseTensors` are supported. Args: st_input: The sparse tensor to convert. Must have rank 2. name: A name prefix for the returned tensors (optional). row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits` tensor. One of `tf.int32` or `tf.int64`. Returns: A `RaggedTensor` with the same values as `st_input`. `output.ragged_rank = rank(st_input) - 1`. `output.shape = [st_input.dense_shape[0], None]`. Raises: ValueError: If the number of dimensions in `st_input` is not known statically, or is not two. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) if not sparse_tensor.is_sparse(st_input): raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__) with ops.name_scope(name, "RaggedFromSparse", [st_input]): st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor( st_input, name="st_input") if st_input.dense_shape.shape.ndims is None: static_rank_from_dense_shape = None else: static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value if st_input.indices.shape.ndims is None: static_rank_from_indices = None else: static_rank_from_indices = st_input.indices.shape.dims[1].value if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2: raise ValueError("rank(st_input) must be 2") with ops.control_dependencies( _assert_sparse_indices_are_ragged_right(st_input.indices)): # Treat sparse row indices as segment ids to generate a splits tensor # thta we can pair with the sparse tensor values. (Ignore sparse column # indices.) segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype) num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype) return cls.from_value_rowids( st_input.values, segment_ids, num_segments, validate=False) def to_sparse(self, name=None): """Converts this `RaggedTensor` into a `tf.SparseTensor`. Example: ```python >>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]]) >>> rt.to_sparse().eval() SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]], values=[1, 2, 3, 4, 5, 6], dense_shape=[4, 3]) ``` Args: name: A name prefix for the returned tensors (optional). Returns: A SparseTensor with the same values as `self`. """ with ops.name_scope(name, "RaggedToSparse", [self]): result = gen_ragged_conversion_ops.ragged_tensor_to_sparse( self.nested_row_splits, self.flat_values, name=name) return sparse_tensor.SparseTensor(result.sparse_indices, result.sparse_values, result.sparse_dense_shape) @classmethod def _from_variant(cls, variant, dtype, output_ragged_rank, input_ragged_rank=None, name=None): """Converts a `variant` Tensor into a `RaggedTensor`. The input `variant` could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could have an arbitrary rank, in which case each element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank` and these are then stacked according to the input shape to output a single `RaggedTensor` with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not provided, it is inferred dynamically as `output_ragged_rank` - `rank(variant)`. If `input_ragged_rank` is provided, the following must be true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`. Example: ```python >>> rt = ragged.constant([[0], [1, 2]]) >>> et = rt._to_variant() >>> stacked_et = ragged.stack([et, et]) >>> ragged.RaggedTensor._from_variant( # scalar input. et, dtype=tf.int32, output_ragged_rank=1).eval().tolist() [[0], [1, 2]] >>> ragged.RaggedTensor._from_variant( # batched input. stacked_et, dtype=tf.int32, output_ragged_rank=2).eval().tolist() [[[0], [1, 2]], [[0], [1, 2]]] ``` Args: variant: A `variant` Tensor representing an encoded (possibly nested-batched) `RaggedTensor`. dtype: The dtype of the encoded `RaggedTensor`. output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is optional and inferred dynamically if not provided. name: A name prefix for the returned tensors (optional). Returns: A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`. Raises: ValueError: If the input rank is known, `input_ragged_rank` is provided and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does not hold. """ variant = ops.convert_to_tensor( variant, name="variant", dtype=dtypes.variant) if (variant.shape.ndims is not None and input_ragged_rank is not None and output_ragged_rank != input_ragged_rank + variant.shape.ndims): raise ValueError( "output_ragged_rank must be equal to input_ragged_rank +" "variant.shape.ndims, found variant.shape.ndims: %d, " "input_ragged_rank: %d, output_ragged_rank: %d" % (variant.shape.ndims, input_ragged_rank, output_ragged_rank)) input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank with ops.name_scope( name, "RaggedFromVariant", [variant, dtype, input_ragged_rank, output_ragged_rank]): result = gen_ragged_conversion_ops.ragged_tensor_from_variant( variant, input_ragged_rank, output_ragged_rank, dtype, dtypes.int64, name) return cls.from_nested_row_splits( result.output_dense_values, result.output_nested_splits, validate=False) def _to_variant(self, batched_input=False, name=None): """Converts this `RaggedTensor` into a `variant` Tensor. If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the zero-th dimension, each component `RaggedTensor` is encoded into a scalar `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and a scalar `variant` Tensor is returned. Example: >>> rt = ragged.constant([[[0]], [[1]], [[2]]]) >>> rt._to_variant().shape.as_list() [] >>> rt._to_variant(batched_input=True).shape.as_list() [3] Args: batched_input: If `True`, the `RaggedTensor` is unbatched and converted to a `variant` vector. Set to `False` by default. name: A name prefix for the returned tensors (optional). Returns: A `variant` Tensor that encodes this `RaggedTensor`. """ with ops.name_scope(name, "RaggedToVariant", [self, batched_input]): return gen_ragged_conversion_ops.ragged_tensor_to_variant( self.nested_row_splits, self.flat_values, batched_input, name) #============================================================================= # String Encoding #============================================================================= def __repr__(self): if self._is_eager(): return "<tf.RaggedTensor %s>" % self.to_list() else: return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self._values, self._row_splits) #============================================================================= # Eager Execution Mode #============================================================================= def to_list(self): """Returns a nested Python `list` with the values for this `RaggedTensor`. Requires that `rt` was constructed in eager execution mode. Returns: A nested Python `list`. """ if self._is_eager(): return self._eager_value().to_list() else: raise ValueError("RaggedTensor.to_list() is only supported in eager " "mode; in graph mode, evaluate the RaggedTensor first " "and then use RaggedTensorValue.to_list().") def _eager_value(self): """Returns a RaggedTensorValue for self. Requires self._is_eager()=true.""" value = self.flat_values.numpy() for row_splits in reversed(self.nested_row_splits): value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy()) return value def _is_eager(self): """Returns True if values & row_splits Tensors are all `EagerTensor`s.""" rt = self while isinstance(rt, RaggedTensor): if not isinstance(rt.row_splits, ops.EagerTensor): return False rt = rt.values return isinstance(rt, ops.EagerTensor) #============================================================================= # Indexing & Slicing #============================================================================= def __getitem__(self, key): """Returns the specified piece of this RaggedTensor.""" # See ragged_getitem.py for the documentation and implementation of this # method. # # Note: the imports in ragged/__init__.py ensure that this method always # gets overridden before it is called. #============================================================================= # Name Scope #============================================================================= # This private function is used by ops.name_scope to ensure that all of the # input tensors for the scope belong to the same graph. Defining this means # that you may include `RaggedTensor` objects in the name_scope `values` # list. def _as_graph_element(self): """Convert `self` to a graph element.""" values = self.values while isinstance(values, RaggedTensor): values = values.values return values #============================================================================= # Composite Tensor #============================================================================= @property def _type_spec(self): return RaggedTensorSpec( shape=self.shape, dtype=self.dtype, ragged_rank=self.ragged_rank, row_splits_dtype=self._row_splits.dtype) def _shape_invariant_to_type_spec(self, shape): return RaggedTensorSpec(shape, self.dtype, self.ragged_rank, self.row_splits.dtype) def consumers(self): return self._consumers() def is_ragged(value): """Returns true if `value` is a ragged tensor or ragged tensor value.""" return isinstance(value, (RaggedTensor, ragged_tensor_value.RaggedTensorValue)) def match_row_splits_dtypes(*tensors, **kwargs): """Return a copy of `tensors` with row_splits all having the same dtype. Args: *tensors: A list of Tensors or RaggedTensors. **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors), where `dtype` is the data type used by row-splits, and `tensors` is the converted list of `Tensors` and `RaggedTensors`. Returns: The converted list of `Tensors` and `RaggedTensors`. """ return_dtype = kwargs.pop("return_dtype", False) if kwargs: raise ValueError("Unexpected keyword args %r" % kwargs) has_int32 = False has_int64 = False for tensor in tensors: if isinstance(tensor, RaggedTensor): if tensor.row_splits.dtype == dtypes.int32: has_int32 = True else: has_int64 = True if has_int32 and has_int64: if not ragged_config.auto_cast_partition_dtype(): raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; " "use RaggedTensor.with_row_splits_dtype() to convert " "them to compatible dtypes.") dtype = dtypes.int64 tensors = tuple(t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor) else t for t in tensors) elif has_int32: dtype = dtypes.int32 else: dtype = dtypes.int64 if return_dtype: return (dtype, tensors) else: return tensors #=============================================================================== # RaggedTensorSpec #=============================================================================== @tf_export("RaggedTensorSpec") class RaggedTensorSpec(type_spec.BatchableTypeSpec): """Type specification for a `tf.RaggedTensor`.""" __slots__ = ["_shape", "_dtype", "_ragged_rank", "_row_splits_dtype"] @property def value_type(self): return RaggedTensor if self._ragged_rank > 0 else ops.Tensor def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None, row_splits_dtype=dtypes.int64): """Constructs a type specification for a `tf.RaggedTensor`. Args: shape: The shape of the RaggedTensor, or `None` to allow any shape. If a shape is specified, then all ragged dimensions must have size `None`. dtype: `tf.DType` of values in the RaggedTensor. ragged_rank: Python integer, the ragged rank of the RaggedTensor to be described. Defaults to `shape.ndims - 1`. row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One of `tf.int32` or `tf.int64`. """ self._shape = tensor_shape.as_shape(shape) self._dtype = dtypes.as_dtype(dtype) self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype) rank = self._shape.ndims if ragged_rank is None: if rank is None: raise ValueError("Must specify ragged_rank or " "a shape with a known rank.") ragged_rank = rank - 1 self._ragged_rank = ragged_rank if not isinstance(self._ragged_rank, int): raise TypeError("ragged_rank must be an int") if rank is not None: if ragged_rank >= rank: raise ValueError("ragged_rank must be less than rank.") def _serialize(self): return (self._shape, self._dtype, self._ragged_rank, self._row_splits_dtype) @property def _component_specs(self): if self._ragged_rank == 0: return [tensor_spec.TensorSpec(self._shape, self._dtype)] flat_values_shape = tensor_shape.TensorShape([None]).concatenate( self._shape[self._ragged_rank + 1:]) outer_dim = tensor_shape.dimension_at_index(self._shape, 0) outer_splits_shape = [None if outer_dim is None else outer_dim + 1] inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype) specs = ( [tensor_spec.TensorSpec(flat_values_shape, self._dtype), tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)] + [inner_splits_spec for _ in range(self._ragged_rank - 1)]) return specs def _to_components(self, value): if is_ragged(value): return [value.flat_values] + list(value.nested_row_splits) else: return [value] def _from_components(self, tensor_list): result = tensor_list[0] if (all(isinstance(t, np.ndarray) for t in tensor_list) and not tf2.enabled()): for row_splits in reversed(tensor_list[1:]): result = ragged_tensor_value.RaggedTensorValue(result, row_splits) else: if isinstance(tensor_list[0], np.ndarray): tensor_list = [ops.convert_to_tensor(t) for t in tensor_list] result = tensor_list[0] for row_splits in reversed(tensor_list[1:]): result = RaggedTensor(result, row_splits, internal=True) return result # The RaggedTensorSpec tensor_list encoding uses to/from_variant ops # to (un)box the component tensors in a way that allows for batching & # unbatching. @property def _flat_tensor_specs(self): # NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is # `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of # boxed `RaggedTensor` objects with shape `(...)` (and batches of batches, # etc.), so the flat shape must be unknown. return [tensor_spec.TensorSpec(None, dtypes.variant)] def _to_tensor_list(self, value): # pylint: disable=protected-access return [value._to_variant(batched_input=False)] def _to_batched_tensor_list(self, value): # pylint: disable=protected-access return [value._to_variant(batched_input=True)] def _from_compatible_tensor_list(self, tensor_list): if self._ragged_rank <= 0: raise ValueError( "ragged_rank must be non-negative; got %s." % self._ragged_rank) result = RaggedTensor._from_variant( # pylint: disable=protected-access tensor_list[0], dtype=self._dtype, output_ragged_rank=self._ragged_rank) if self._shape.ndims is not None: outer_dim = tensor_shape.dimension_value(self._shape[0]) if outer_dim is not None: result.row_splits.set_shape([outer_dim + 1]) result.flat_values.set_shape( tensor_shape.TensorShape([None]).concatenate( self._shape[1 + self._ragged_rank:])) return result def _batch(self, batch_size): return RaggedTensorSpec( tensor_shape.TensorShape([batch_size]).concatenate(self._shape), self._dtype, self._ragged_rank + 1) def _unbatch(self): # Note: Negative ragged_rank is allowed here because the dataset could # be subsequently batched again. Errors are handled in # RaggedTensorSpec._from_compatible_tensor_list() return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1) def _to_legacy_output_types(self): return self._dtype def _to_legacy_output_shapes(self): return self._shape def _to_legacy_output_classes(self): return self @classmethod def from_value(cls, value): return cls(shape=value.shape, dtype=value.values.dtype, ragged_rank=value.ragged_rank, row_splits_dtype=value.row_splits.dtype) type_spec.register_type_spec_from_value_converter( ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value) #=============================================================================== # Convert value -> tensor #=============================================================================== def convert_to_tensor_or_ragged_tensor(value, dtype=None, preferred_dtype=None, name=None): """Converts value to a `RaggedTensor` or `Tensor`. * If `value` is a `RaggedTensor`, then return it as-is. * If `value` is a `RaggedTensorValue`, return a corresponding constant `RaggedTensor`. * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`. Args: value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing the type is inferred from the type of `value`. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. This argument has no effect if `value` is already a tensor, or when conversion is not possible. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` or `RaggedTensor`. """ if isinstance(value, RaggedTensor): if dtype and not dtype.is_compatible_with(value.dtype): raise ValueError("Tensor conversion requested dtype %s for " "RaggedTensor with dtype %s: %r" % (dtype.name, value.dtype.name, value)) return value elif isinstance(value, ragged_tensor_value.RaggedTensorValue): with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []): flat_values = ops.convert_to_tensor( value=value.flat_values, dtype=dtype, preferred_dtype=preferred_dtype, name="flat_values") return RaggedTensor.from_nested_row_splits( flat_values, value.nested_row_splits, validate=False) else: return ops.convert_to_tensor( value=value, dtype=dtype, preferred_dtype=preferred_dtype, name=name) #=============================================================================== # Register RaggedTensor for use with session.run. #=============================================================================== def _ragged_tensor_value_from_components(components): components = list(components) value = components.pop() while components: value = ragged_tensor_value.RaggedTensorValue(value, components.pop()) return value def _ragged_tensor_session_fetch(rt): components = rt.nested_row_splits + (rt.flat_values,) return (components, _ragged_tensor_value_from_components) def _ragged_tensor_session_feed(feed_key, feed_val): key_components = feed_key.nested_row_splits + (feed_key.flat_values,) val_components = feed_val.nested_row_splits + (feed_val.flat_values,) return zip(key_components, val_components) def _ragged_tensor_session_feed_for_partial_run(feed_key): return feed_key.nested_row_splits + (feed_key.flat_values,) session.register_session_run_conversion_functions( RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed, _ragged_tensor_session_feed_for_partial_run) #=============================================================================== # RaggedTensorType #=============================================================================== class RaggedTensorType(object): """Encoding of a static type for a `RaggedTensor`. Use this type to express/declare that an output must have the type of `RaggedTensor`. """ def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64): """Initializes a RaggedTensorType object. Args: dtype: data type of the `RaggedTensor`'s inner values. ragged_rank: ragged_rank of the declared `RaggedTensor`. row_splits_dtype: data type for the `RaggedTensor`'s row splits. One of: `tf.int32` or `tf.int64`. """ row_splits_dtype = dtypes.as_dtype(row_splits_dtype) self._dtype = dtype self._ragged_rank = ragged_rank self._row_splits_dtype = row_splits_dtype dtype = property(lambda self: self._dtype) ragged_rank = property(lambda self: self._ragged_rank) row_splits_dtype = property(lambda self: self._row_splits_dtype) #=============================================================================== # Helper Functions #=============================================================================== def _assert_sparse_indices_are_ragged_right(indices): """Checks that the given SparseTensor.indices tensor is ragged-right. Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right because the entry `[3, 1]` skips a cell. Args: indices: The SparseTensor indices to check. Returns: A list of control dependency op tensors. """ index_prefix = indices[:, :-1] index_suffix = indices[:, -1] # Check whether each index is starting a new row in the innermost dimension # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]). # (Note: this skips the first index; we will check that separately below.) index_prefix_changed = math_ops.reduce_any( math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1) # Check two cases: # * For indices that start a new row: index_suffix[i] must be zero. # * For indices that continue a row: index_suffix[i] must be equal to # index_suffix[i-1]+1. index_ok = array_ops.where( index_prefix_changed, math_ops.equal(index_suffix[1:], 0), math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1)) # Also check that the very first index didn't skip any cells. The first # index starts a new row (by definition), so its suffix should be zero. sparse_indices_are_ragged_right = math_ops.logical_and( math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)), math_ops.reduce_all(index_ok)) message = [ "SparseTensor is not right-ragged", "SparseTensor.indices =", indices ] return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)] @ops.RegisterGradient("RaggedTensorToSparse") def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad, sparse_values_grad, unused_sparse_shape_grad): """Gradient for RaggedTensorToSparse.""" op_inputs_nested_row_splits = op.inputs[:-1] op_inputs_flat_values = op.inputs[-1] # No gradient for the RaggedTensor's nested_row_splits. nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits) # Gradient for the RaggedTensor's flat_values is formed by reshaping # the gradient for the SparseTensor's values. flat_values_shape = array_ops.shape(op_inputs_flat_values) flat_values_gradient = array_ops.reshape(sparse_values_grad, flat_values_shape) return nested_row_splits_gradient + [flat_values_gradient] def _assert_monotonic_increasing(tensor, message=None): return check_ops.assert_non_negative( tensor[1:] - tensor[:-1], message=message) def _assert_zero(tensor, message=None): return check_ops.assert_equal( tensor, constant_op.constant(0, dtype=tensor.dtype), message=message) def _nrows(tensor, out_type=dtypes.int32): if isinstance(tensor, RaggedTensor): return tensor.nrows(out_type=out_type) else: return array_ops.shape(tensor, out_type=out_type)[0] ops.no_gradient("RaggedTensorToVariant")
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Array operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sort_ops from tensorflow.python.ops.ragged import ragged_functional_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.ops.ragged import segment_id_ops from tensorflow.python.util.tf_export import tf_export #=============================================================================== # Masking #=============================================================================== @tf_export('ragged.boolean_mask') def boolean_mask(data, mask, name=None): """Applies a boolean mask to `data` without flattening the mask dimensions. Returns a potentially ragged tensor that is formed by retaining the elements in `data` where the corresponding value in `mask` is `True`. * `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]` Where `j` is the `i`th `True` entry of `mask[a1...aA]`. Note that `output` preserves the mask dimensions `a1...aA`; this differs from `tf.boolean_mask`, which flattens those dimensions. Args: data: A potentially ragged tensor. mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix of `data`'s shape. `rank(mask)` must be known statically. name: A name prefix for the returned tensor (optional). Returns: A potentially ragged tensor that is formed by retaining the elements in `data` where the corresponding value in `mask` is `True`. * `rank(output) = rank(data)`. * `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`. Raises: ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is not a prefix of `data.shape`. #### Examples: ```python >>> # Aliases for True & False so data and mask line up. >>> T, F = (True, False) >>> tf.ragged.boolean_mask( # Mask a 2D Tensor. ... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], ... mask=[[T, F, T], [F, F, F], [T, F, F]]).tolist() [[1, 3], [], [7]] >>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor. ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... tf.ragged.constant([[F, F, T], [F], [T, T]])).tolist() [[3], [], [5, 6]] >>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor. ... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]), ... tf.ragged.constant([True, False, True])).tolist() [[1, 2, 3], [5, 6]] ``` """ with ops.name_scope(name, 'RaggedMask', [data, mask]): # Convert inputs to tensors. data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') mask = ragged_tensor.convert_to_tensor_or_ragged_tensor( mask, dtypes.bool, name='mask') row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes( data, mask, return_dtype=True) # Get static rank of mask. if mask.shape.ndims is None: raise ValueError('mask.shape.ndims must be known statically.') elif mask.shape.ndims == 0: raise ValueError('mask cannot be scalar.') # If mask is ragged, then recurse with a non-ragged mask. if ragged_tensor.is_ragged(mask): if not ragged_tensor.is_ragged(data): data = ragged_tensor.RaggedTensor.from_tensor( data, ragged_rank=mask.ragged_rank, row_splits_dtype=mask.row_splits.dtype) # Check that mask.nested_row_splits is a prefix of # data.nested_row_splits. splits_list = [ mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank] ] with ops.control_dependencies( ragged_util.assert_splits_match(splits_list)): # Strip off ragged `splits` until `mask` is non-ragged. Keep the splits # that we strip off in `splits`, so we can add them back on after # we recursively mask the non-ragged data. splits = [] while ragged_tensor.is_ragged(mask): if mask.shape.ndims > 2: splits.append(mask.row_splits) else: # Count the number of True mask values in each row to find the # lengths of the filtered rows; then convert to splits. int_mask = ragged_functional_ops.map_flat_values( math_ops.cast, mask, dtype=row_splits_dtype) masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1) splits.append(ragged_util.lengths_to_splits(masked_row_lengths)) mask = mask.values data = data.values # Recursively apply the nested non-ragged mask to the nested data. masked_values = boolean_mask(data, mask) # Add the ragged `splits` back to the result. masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits( masked_values, splits, validate=False) return masked_values # If mask is non-ragged and has rank 1, and data is ragged, then build a # ragged tensor with the indicated rows. elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1: # Get the masked splits: first get the length of each row, then filter # out the rows that we are deleting, and convert that filtered set of # masks back to a splits tensor. lengths = data.row_lengths() masked_lengths = array_ops.boolean_mask(lengths, mask) masked_splits = ragged_util.lengths_to_splits(masked_lengths) # Get the masked values: first get row ids corresponding to each # value, then use tf.gather to build a boolean mask that's false for # values that come from rows that we are deleting, and use that mask to # construct the masked values tensor. segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits) segment_mask = array_ops.gather(mask, segment_ids) masked_values = boolean_mask(data.values, segment_mask) return ragged_tensor.RaggedTensor.from_row_splits(masked_values, masked_splits, validate=False) # If mask is non-ragged and has rank>1, then convert it to be ragged, # with a ragged rank matching data. if ragged_tensor.is_ragged(data): mask = ragged_tensor.RaggedTensor.from_tensor( mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1), row_splits_dtype=data.row_splits.dtype) return boolean_mask(data, mask) # Otherwise, data and mask are both `Tensor`s. else: # Apply `boolean_mask` to get the masked values. masked_values = array_ops.boolean_mask(data, mask) if mask.shape.ndims >= 2: # Add the innermost ragged dimension. For each innermost cell, get the # number of values it contains. Then flatten that to get a list of # cell lengths, and convert it to splits. Finally, combine the splits # and values to get the innermost ragged tensor. masked_lengths = math_ops.count_nonzero(mask, axis=-1, dtype=row_splits_dtype) flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1]) masked_values = ragged_tensor.RaggedTensor.from_row_lengths( masked_values, flattened_masked_lengths, validate=False) # Wrap remaining ragged dimensions. if mask.shape.ndims > 2: mask_shape = array_ops.shape(mask, out_type=row_splits_dtype) split_size = math_ops.cumprod(mask_shape) + 1 for dim in range(mask.shape.ndims - 3, -1, -1): elt_size = mask_shape[dim + 1] masked_splits = math_ops.range(split_size[dim]) * elt_size masked_values = ragged_tensor.RaggedTensor.from_row_splits( masked_values, masked_splits, validate=False) return masked_values #=============================================================================== # Tiling #=============================================================================== def tile(input, multiples, name=None): # pylint: disable=redefined-builtin """Constructs a `RaggedTensor` by tiling a given `RaggedTensor`. The values of `input` are replicated `multiples[i]` times along the `i`th dimension (for each dimension `i`). For every dimension `axis` in `input`, the length of each output element in that dimension is the length of corresponding input element multiplied by `multiples[axis]`. Args: input: A `RaggedTensor`. multiples: A 1-D integer `Tensor`. Length must be the same as the number of dimensions in `input`. name: A name for the operation (optional). Returns: A `RaggedTensor` with the same type, rank, and ragged_rank as `input`. #### Example: ```python >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> ragged.tile(rt, [3, 2]) [[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]] ``` """ with ops.name_scope(name, 'RaggedTile', [input, multiples]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input, name='input') if not ragged_tensor.is_ragged(input): return array_ops.tile(input, multiples, name) multiples = ragged_util.convert_to_int_tensor( multiples, name='multiples', dtype=input.row_splits.dtype) multiples.shape.assert_has_rank(1) # If the constant value of `multiples` is available, then we can use it # to skip tiling dimensions where `multiples=1`. const_multiples = tensor_util.constant_value(multiples) return ragged_tensor.RaggedTensor.from_nested_row_splits( _tile_ragged_values(input, multiples, const_multiples), _tile_ragged_splits(input, multiples, const_multiples), validate=False) def _tile_ragged_values(rt_input, multiples, const_multiples=None): """Builds flat_values tensor for a tiled `RaggedTensor`. Returns a tensor that repeats the values in `rt_input.flat_values` in the appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as specified by `multiples`. Args: rt_input: The `RaggedTensor` whose values should be repeated. multiples: A 1-D integer `tensor`, indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where `multiples=1`. Returns: A `Tensor` with the same type and rank as `rt_input.flat_values`. #### Example: ```python >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_values(rt, [3, 2]) [1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3] ``` """ ragged_rank = rt_input.ragged_rank nested_splits = rt_input.nested_row_splits # Pointers to the values in `rt_input.flat_values`. inner_value_ids = math_ops.range(nested_splits[-1][-1]) # For each ragged dimension (working from the innermost to outermost), # expand `inner_value_ids` as necessary to tile that dimension. prev_splits = None for axis in range(ragged_rank, 0, -1): # Ragged splits for this dimension. splits = nested_splits[axis - 1] # Adjust splits so they point into `inner_value_ids` (instead of just # pointing into the next dimension's values). if prev_splits is not None: # Not the first pass through the loop. splits = array_ops.gather(prev_splits * multiples[axis + 1], splits) # Repeat each element in this ragged dimension `multiples[axis]` times. if const_multiples is None or const_multiples[axis] != 1: inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits, multiples[axis]) prev_splits = splits # Gather the tiled inner values. ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids) # Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus # `axis=range(ragged_rank, rank)`). inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]], axis=0) return array_ops.tile(ragged_tiled_values, inner_repeats) def _tile_ragged_splits(rt_input, multiples, const_multiples=None): """Builds nested_split tensors for a tiled `RaggedTensor`. Returns a list of split tensors that can be used to construct the `RaggedTensor` that tiles `rt_input` as specified by `multiples`. Args: rt_input: The `RaggedTensor` that is being tiled. multiples: A 1-D integer `tensor`, indicating how many times each dimension should be repeated. const_multiples: Optional constant value for multiples. Used to skip tiling dimensions where `multiples=1`. Returns: A list of 1-D integer `Tensor`s (one for each ragged dimension in `rt_input`). #### Example: ```python >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> _tile_ragged_splits(rt, [3, 2]) [0, 4, 6, 10, 12, 16, 18] ``` """ ragged_rank = rt_input.ragged_rank nested_splits = rt_input.nested_row_splits # projected_splits[src_axis, dst_axis] contains the split points that divide # the rows from src_axis in the list of dst_axis values. E.g., # projected_splits[i, i] = nested_splits[i], and # projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]). projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)] for src_axis in range(ragged_rank): for dst_axis in range(src_axis + 1, ragged_rank - 1): projected_splits[src_axis][dst_axis] = array_ops.gather( nested_splits[dst_axis], projected_splits[src_axis][dst_axis - 1]) # For each ragged dimension: nested_splits[axis] -> result_splits[axis]. result_splits = [] for axis in range(ragged_rank): # Get the length of each row for the input tensor for this dimension. input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1] # Multiply those lengths by the `multiples` of dimension axis+1, since # each value will be repeated that number of times. output_lengths = input_lengths * multiples[axis + 1] # Repeat ranges of the row lengths as necessary for them to be tiled in # each ragged dimension `d < axis`. (Start with dimension d=axis-1, and # work our way up to dimension d=0.) repeats = 1 for d in range(axis - 1, -1, -1): if const_multiples is None or const_multiples[d + 1] != 1: splits = projected_splits[d][axis - 1] * repeats output_lengths = ragged_util.repeat_ranges(output_lengths, splits, multiples[d + 1]) repeats *= multiples[d + 1] # Tile splits for the outermost (uniform) dimension. output_lengths = array_ops.tile(output_lengths, multiples[:1]) # Convert to splits. result_splits.append(ragged_util.lengths_to_splits(output_lengths)) return result_splits #=============================================================================== # Reshaping #=============================================================================== def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin """Inserts a dimension with shape 1 into a potentially ragged tensor's shape. Given a potentially ragged tenor `input`, this operation inserts a dimension with size 1 at the dimension `axis` of `input`'s shape. * If `input` is a `Tensor`, then this is equivalent to `tf.expand_dims`. * If `input` is ragged, and `axis=0`, then the new dimension will be uniform; but the previously outermost dimension will become ragged. * If `input` is ragged, and `0 < axis < input.ragged_rank`, then the new dimension will be ragged. * If `input` is ragged, and axis >= input.ragged_rank`, then the new dimension will be uniform. The following table gives some examples showing how `ragged.expand_dims` impacts the shapes of different input tensors. Ragged dimensions are indicated by enclosing them in parentheses. input.shape | axis | result.shape ----------------------- | ---- | ----------------------------- `[D1, D2]` | `0` | `[1, D1, D2]` `[D1, D2]` | `1` | `[D1, 1, D2]` `[D1, D2]` | `2` | `[D1, D2, 1]` `[D1, (D2), (D3), D4]` | `0` | `[1, (D1), (D2), (D3), D4]` `[D1, (D2), (D3), D4]` | `1` | `[D1, (1), (D2), (D3), D4]` `[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), (1), (D3), D4]` `[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]` `[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]` Args: input: The potentially tensor that should be expanded with a new dimension. axis: An integer constant indicating where the new dimension should be inserted. name: A name for the operation (optional). Returns: A tensor with the same values as `input`, with an added dimension of size 1 at `axis`. #### Examples: ```python >>> rt = tf.ragged.constant([[1, 2], [3]]) >>> print rt.shape TensorShape([2, None]) >>> expanded = ragged.expand_dims(rt, axis=0) >>> print(expanded.shape, expanded) TensorShape([1, None, None]) [[[1, 2], [3]]] >>> expanded = ragged.expand_dims(rt, axis=1) >>> print(expanded.shape, expanded) TensorShape([2, None, None]) [[[1, 2]], [[3]]] >>> expanded = ragged.expand_dims(rt, axis=2) >>> print(expanded.shape, expanded) TensorShape([2, None, 1]) [[[1], [2]], [[3]]] ``` """ with ops.name_scope(name, 'RaggedExpandDims', [input]): input = ragged_tensor.convert_to_tensor_or_ragged_tensor( input, name='input') if not ragged_tensor.is_ragged(input): return array_ops.expand_dims(input, axis) ndims = None if input.shape.ndims is None else input.shape.ndims + 1 axis = ragged_util.get_positive_axis(axis, ndims) if axis == 0: values = input splits = array_ops.stack([0, input.nrows()]) elif axis == 1: values = input splits = math_ops.range(input.nrows() + 1) else: values = expand_dims(input.values, axis - 1) splits = input.row_splits return ragged_tensor.RaggedTensor.from_row_splits(values, splits, validate=False) #=============================================================================== # RaggedTensor Size #=============================================================================== def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin """Returns the size of a potentially ragged tensor. The size of a ragged tensor is the size of its inner values. Args: input: A potentially ragged `Tensor`. out_type: The numeric output type for the operation. name: A name for the operation (optional). Returns: A Tensor of type `out_type`. #### Example: ```python >>> tf.size(tf.ragged.constant([[1, 2], [3]])) 3 ``` """ if ragged_tensor.is_ragged(input): return array_ops.size(input.flat_values, out_type=out_type, name=name) else: return array_ops.size(input, out_type=out_type, name=name) #=============================================================================== # ragged.rank #=============================================================================== def rank(input, name=None): # pylint: disable=redefined-builtin """Returns the rank of a RaggedTensor. Returns a 0-D `int32` `Tensor` representing the rank of `input`. For example: ```python # shape of tensor 't' is [2, None, None] t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]]) tf.rank(t) # 3 ``` Args: input: A `RaggedTensor` name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. """ with ops.name_scope(name, 'RaggedRank', [input]) as name: if not ragged_tensor.is_ragged(input): return array_ops.rank(input, name) return input.ragged_rank + array_ops.rank(input.flat_values) #=============================================================================== # ragged.one_hot #=============================================================================== def ragged_one_hot(indices, depth, on_value=None, off_value=None, axis=None, dtype=None, name=None): """Applies tf.one_hot along the values of a RaggedTensor.""" with ops.name_scope(name, 'RaggedOneHot', [indices]): indices = ragged_tensor.convert_to_tensor_or_ragged_tensor( indices, name='indices') if axis is not None: axis = ragged_util.get_positive_axis(axis, indices.shape.ndims) if axis < indices.ragged_rank: raise ValueError('axis may not be less than indices.ragged_rank.') return indices.with_flat_values( array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis, dtype, name)) #=============================================================================== # ragged.stack_dynamic_partitions #=============================================================================== @tf_export('ragged.stack_dynamic_partitions') def stack_dynamic_partitions(data, partitions, num_partitions, name=None): """Stacks dynamic partitions of a Tensor or RaggedTensor. Returns a RaggedTensor `output` with `num_partitions` rows, where the row `output[i]` is formed by stacking all slices `data[j1...jN]` such that `partitions[j1...jN] = i`. Slices of `data` are stacked in row-major order. If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to `tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`. ####Example: ```python >>> data = ['a', 'b', 'c', 'd', 'e'] >>> partitions = [ 3, 0, 2, 2, 3] >>> num_partitions = 5 >>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions) <RaggedTensor [['b'], [], ['c', 'd'], ['a', 'e'], []]> ``` Args: data: A `Tensor` or `RaggedTensor` containing the values to stack. partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the partition that each slice of `data` should be added to. `partitions.shape` must be a prefix of `data.shape`. Values must be greater than or equal to zero, and less than `num_partitions`. `partitions` is not required to be sorted. num_partitions: An `int32` or `int64` scalar specifying the number of partitions to output. This determines the number of rows in `output`. name: A name prefix for the returned tensor (optional). Returns: A `RaggedTensor` containing the stacked partitions. The returned tensor has the same dtype as `data`, and its shape is `[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a ragged dimension whose length is the number of data slices stacked for each `partition`. """ with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]): # Convert inputs to tensors. data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data') row_splits_dtype = ( data.row_splits.dtype if isinstance(data, ragged_tensor.RaggedTensor) else None) partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor( partitions, name='partitions', preferred_dtype=row_splits_dtype) num_partitions = ops.convert_to_tensor( num_partitions, name='num_partitions', preferred_dtype=partitions.dtype) if row_splits_dtype is not None: partitions = math_ops.cast(partitions, row_splits_dtype) num_partitions = math_ops.cast(num_partitions, partitions.dtype) # Sanity-checks for shapes. partitions_rank = partitions.shape.ndims if partitions_rank is None: raise ValueError('partitions must have known rank.') num_partitions.shape.assert_has_rank(0) partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank]) if partitions_rank == 0: # If partitions is a scalar, then just create a RaggedTensor containing # that single the complete `data` value in the specified row. return ragged_tensor.RaggedTensor.from_value_rowids( values=array_ops.stack([data]), value_rowids=array_ops.stack([partitions]), nrows=num_partitions, validate=False) elif partitions_rank == 1: # If partitions is a vector (the typical case): we can just use data and # partitions as the `values` and `value_rowids` for `from_value_rowids`, # as long as we sort them first. permutation = sort_ops.argsort(partitions, stable=True) value_rowids = array_ops.gather(partitions, permutation) values = array_ops.gather(data, permutation) check = check_ops.assert_less( value_rowids[-1:], num_partitions, message='partitions must be less than num_partitions') with ops.control_dependencies([check]): return ragged_tensor.RaggedTensor.from_value_rowids( values, value_rowids, nrows=num_partitions, validate=False) else: # Handle higher-dimensional partitions via recursion. if not isinstance(data, ragged_tensor.RaggedTensor): data = ragged_tensor.RaggedTensor.from_tensor( data, row_splits_dtype=partitions.dtype, ragged_rank=1) if not isinstance(partitions, ragged_tensor.RaggedTensor): partitions = ragged_tensor.RaggedTensor.from_tensor( partitions, row_splits_dtype=partitions.dtype, ragged_rank=max(data.ragged_rank, partitions_rank - 1)) check = check_ops.assert_equal( data.row_splits, partitions.row_splits, message='data and partitions have incompatible ragged shapes') with ops.control_dependencies([check]): return stack_dynamic_partitions(data.values, partitions.values, num_partitions)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_array_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Shapes & broadcasting for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_config from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged import ragged_util class RaggedTensorDynamicShape(object): """A collection of tensors encoding the shape of a potentially ragged tensor. Each `RaggedTensorDynamicShape` consists of an ordered list of dimension sizes. There are two dimension types: * "Uniform dimensions" are dimenisons where all slices have the same length. `RaggedTensorDynamicShape` records the size of each uniform dimension using a single scalar integer. * "Ragged dimensions" are dimensions whose slices may have different lengths. `RaggedTensorDynamicShape` records the size of each ragged dimension using an integer vector containing the slice lengths for all the slices across that dimension. Furthermore, there are two ways a dimension might be encoded: * "Partitioned dimensions" are dimensions that are encoded using a `RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned dimension must be uniform, and the innermost partitioned dimension must be ragged. * "Inner dimensions" are dimensions that are encoded using a `RaggedTensor`'s `flat_values`. Inner dimensions are always uniform. The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes` and `inner_dim_sizes`: * `paritioned_dim_sizes` is a list of tensors (one for each partitioned dimension). * For uniform dimensions, the tensor is an integer scalar specifying the size of all slices across that dimension. * For ragged dimensions, the tensor is an integer vector specifying the size of each slice across that dimension. * `inner_dim_sizes` is a single integer vector, where each element specifies the size of a single inner dimension. Examples: Tensor | Ragged | Partitioned Dim Sizes | Inner Dim : Rank : : Sizes ------------------------------ | ------ | ---------------------- | ---------- `[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3` `[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` | `[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2 `[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` | """ def __init__(self, partitioned_dim_sizes, inner_dim_sizes, dim_size_dtype=None): """Creates a RaggedTensorDynamicShape. Args: partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for each partitioned dimension. If dimension `d` is uniform, then `partitioned_dim_sizes[d]` must be an integer scalar, specifying the size of all slices across dimension `d`. If dimension `d` is ragged, then `partitioned_dim_sizes[d]` must be an integer vector, specifying the size of each slice across dimension `d`. inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the number of inner dimensions. `inner_dim_sizes[n]` is the size of all slices across the `n`th inner dimension (which is the `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor. dim_size_dtype: dtype for dimension sizes. If not specified, then it is chosen based on the dtypes of `partitioned_dim_sizes` and `inner_dim_sizes`. """ assert isinstance(partitioned_dim_sizes, (list, tuple)) with ops.name_scope(None, 'RaggedTensorDynamicShape', (partitioned_dim_sizes, inner_dim_sizes)): partitioned_dim_sizes = tuple( ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) for (i, size) in enumerate(partitioned_dim_sizes)) inner_dim_sizes = ops.convert_to_tensor( inner_dim_sizes, name='inner_dim_sizes') # Validate shapes. if partitioned_dim_sizes: for axis, dimension_size in enumerate(partitioned_dim_sizes): if dimension_size.shape.ndims is None: raise ValueError( 'rank of partitioned_dim_sizes[%d] is unknown' % axis) dimension_size.shape.with_rank_at_most(1) if partitioned_dim_sizes[0].shape.ndims == 1: raise ValueError('outermost partitioned dimension must be uniform') if partitioned_dim_sizes[-1].shape.ndims == 0: raise ValueError('innermost partitioned dimension must be ragged') inner_dim_sizes.shape.assert_has_rank(1) # Convert dimension size tensors to a single dtype. if dim_size_dtype is None: dim_size_dtypes = set([p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1]) if not dim_size_dtypes: dim_size_dtype = dtypes.int64 elif len(dim_size_dtypes) == 1: dim_size_dtype = dim_size_dtypes.pop() else: if not ragged_config.auto_cast_partition_dtype(): raise ValueError('partitioned_dim_sizes must have matching dtypes') dim_size_dtype = dtypes.int64 partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype) for p in partitioned_dim_sizes) inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype) self._partitioned_dim_sizes = partitioned_dim_sizes self._inner_dim_sizes = inner_dim_sizes def __repr__(self): return ('RaggedTensorDynamicShape' '(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' % (self._partitioned_dim_sizes, self._inner_dim_sizes)) @staticmethod def from_dim_sizes(dim_sizes): """Constructs a ragged shape from a list of dimension sizes. This list contains a single tensor for each dimension, where the tensor is a scalar if the dimension is uniform, or a vector if the dimension is ragged. Args: dim_sizes: List of int32 or int64 scalars or vectors. Returns: A RaggedTensorDynamicShape. """ with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', [dim_sizes]): dim_sizes = tuple( ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes) # Split the dimensions into partitioned & inner dimensions. inner_split = 0 for dim, dim_size in enumerate(dim_sizes): if dim_size.shape.ndims == 1: inner_split = dim + 1 elif dim_size.shape.ndims != 0: raise ValueError('Each dim_size must be a scalar or a vector') return RaggedTensorDynamicShape(dim_sizes[:inner_split], dim_sizes[inner_split:]) @classmethod def from_tensor(cls, rt_input, dim_size_dtype=None): """Constructs a ragged shape for a potentially ragged tensor.""" with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]): rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) if not ragged_tensor.is_ragged(rt_input): return cls([], array_ops.shape(rt_input)) else: partitioned_dim_sizes = ( (rt_input.nrows(),) + rt_input.nested_row_lengths()) return RaggedTensorDynamicShape( partitioned_dim_sizes, array_ops.shape(rt_input.flat_values)[1:], dim_size_dtype=dim_size_dtype) def dimension_size(self, axis): """Returns the size of slices across the specified dimension.""" if not isinstance(axis, int): raise TypeError('axis must be an integer') partitioned_ndims = len(self._partitioned_dim_sizes) if axis < partitioned_ndims: return self._partitioned_dim_sizes[axis] else: return self._inner_dim_sizes[axis - partitioned_ndims] def is_ragged(self, axis): """Returns true if the indicated dimension is ragged.""" if not isinstance(axis, int): raise TypeError('axis must be an integer') rank = self.rank if axis < 0: raise ValueError('Negative axis values are not supported') elif rank is not None and axis >= rank: raise ValueError('Expected axis=%s < rank=%s' % (axis, rank)) else: return (axis > 0 and axis < len(self._partitioned_dim_sizes) and self._partitioned_dim_sizes[axis].shape.ndims == 1) @property def rank(self): """The number of dimensions in this shape, or None if unknown.""" inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) if inner_ndims is None: return None else: return len(self._partitioned_dim_sizes) + inner_ndims @property def partitioned_dim_sizes(self): """The partitioned dimension sizes for this shape. Returns: A `list` of 0-D or 1-D integer `Tensor`. """ return self._partitioned_dim_sizes @property def inner_dim_sizes(self): """The inner dimension sizes for this shape. Returns: A 1-D integer `Tensor`. """ return self._inner_dim_sizes @property def num_partitioned_dimensions(self): """The number of partitioned dimensions in this shape.""" return len(self._partitioned_dim_sizes) @property def num_inner_dimensions(self): """The number of inner dimensions, or `None` if not statically known.""" return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0]) @property def dim_size_dtype(self): """DType used by this shape for dimension sizes.""" return self._inner_dim_sizes.dtype def broadcast_to_rank(self, rank): """Adds leading size-1 dimensions to broadcast `self` to the given rank. E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)` is `[1, 1, 3, (D2), 4]`. Args: rank: The rank for the returned shape. Returns: A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions have the same size as `self` and whose outer dimensions have size `1`. Raises: ValueError: If `self.rank` is unknown or greater than `rank`. """ if self.rank is None: raise ValueError('Unable to broadcast: self.rank is unknown') dims_to_add = rank - self.rank if dims_to_add < 0: raise ValueError('Unable to broadcast: rank=%d must be greater than ' 'self.rank=%d.' % (rank, self.rank)) elif dims_to_add == 0: return self elif self._partitioned_dim_sizes: partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes return RaggedTensorDynamicShape(partitioned_dims, self._inner_dim_sizes) else: inner_dims = array_ops.concat( [array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0) return RaggedTensorDynamicShape([], inner_dims) def broadcast_dimension(self, axis, lengths): """Returns a shape that is broadcast-compatible with self & lengths. * If dimension[axis] is uniform and lengths is a scalar, the check that either lengths==1 or axis==1 or lengths==axis, and tile dimension[axis] with tf.where(lengths==axis, 1, axis) repeats. * If dimension[axis] is uniform and lengths is a vector, then check that dimension[axis]==1, and raggedly tile dimension[axis] with lengths repeats. (we can skip tiling if we statically know that slice_lengths == 1??) * If dimension[axis] is ragged and lengths is a scalar, then check that lengths==1. * If dimension[axis] is ragged and lengths is a vector, then check that self.dimension_size(axis) == lengths. Args: axis: `int`. The dimension to broadcast. lengths: 0-D or 1-D integer `Tensor`. Returns: A `RaggedTensorDynamicShape`. """ lengths = ragged_util.convert_to_int_tensor( lengths, name='lengths', dtype=self.dim_size_dtype) # Check whether lengths is a scalar (for uniform dimensions) or # vector (for ragged dimensions). if lengths.shape.ndims is None: raise ValueError('lengths must have a known rank.') elif lengths.shape.ndims > 1: raise ValueError('lengths must be a scalar or vector') else: lengths_is_scalar = (lengths.shape.ndims == 0) # Verify that the shapes are compatible. if self.is_ragged(axis): if lengths_is_scalar: condition = math_ops.equal(lengths, 1) else: condition = math_ops.reduce_all( math_ops.equal(lengths, self.dimension_size(axis))) else: axis_dim_size = self.dimension_size(axis) if lengths_is_scalar: condition = ( math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) | math_ops.equal(axis_dim_size, lengths)) else: condition = math_ops.equal(axis_dim_size, 1) broadcast_err = [ 'Unable to broadcast: dimension size mismatch in dimension', axis, 'lengths=', lengths, 'dim_size=', self.dimension_size(axis) ] broadcast_check = control_flow_ops.Assert( condition, data=broadcast_err, summarize=10) with ops.control_dependencies([broadcast_check]): # Partitioned dimensions: if axis < self.num_partitioned_dimensions: if self.is_ragged(axis): # Use an identity op to make sure the check actually gets run. return RaggedTensorDynamicShape( self._partitioned_dim_sizes, array_ops.identity(self.inner_dim_sizes)) else: return self._broadcast_uniform_partitioned_dimension(axis, lengths) # Inner dimensions: else: if lengths_is_scalar: return self._broadcast_inner_dimension_to_uniform(axis, lengths) else: if axis == 0: raise ValueError('Unable to broadcast: ' 'outermost dimension must be uniform.') return self._broadcast_inner_dimension_to_ragged(axis, lengths) def num_slices_in_dimension(self, axis): """Returns the total number of slices across the indicated dimension.""" if axis < 0: return constant_op.constant(1, dtype=self.dim_size_dtype) elif self.is_ragged(axis): return math_ops.reduce_sum(self._partitioned_dim_sizes[axis]) else: return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1) def _broadcast_uniform_partitioned_dimension(self, axis, lengths): """Broadcasts the partitioned dimension `axis` to match `lengths`.""" axis_dim_size = self.dimension_size(axis) partitioned_sizes = list(self._partitioned_dim_sizes[:axis]) if lengths.shape.ndims == 0: lengths = array_ops.where( math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size) repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1) splits = array_ops.stack([0, self.num_slices_in_dimension(axis)]) else: splits = math_ops.range( array_ops.size(lengths, out_type=self.dim_size_dtype) + 1) repeats = lengths partitioned_sizes.append(lengths) for dim_size in self._partitioned_dim_sizes[axis + 1:]: if dim_size.shape.ndims == 0: partitioned_sizes.append(dim_size) splits *= dim_size else: partitioned_sizes.append( ragged_util.repeat_ranges(dim_size, splits, repeats)) splits = array_ops.gather( ragged_util.lengths_to_splits(dim_size), splits) inner_sizes = self._inner_dim_sizes return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes) def _broadcast_inner_dimension_to_uniform(self, axis, length): """Broadcasts the inner dimension `axis` to match `lengths`.""" dim_size = self.dimension_size(axis) axis_in_inner_dims = axis - self.num_partitioned_dimensions partitioned_sizes = self._partitioned_dim_sizes inner_sizes = array_ops.concat([ self._inner_dim_sizes[:axis_in_inner_dims], [array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)], self._inner_dim_sizes[axis_in_inner_dims + 1:] ], axis=0) return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes) def _broadcast_inner_dimension_to_ragged(self, axis, lengths): axis_in_inner_dims = axis - self.num_partitioned_dimensions partitioned_sizes = ( self._partitioned_dim_sizes + tuple([ self._inner_dim_sizes[i] for i in range(axis_in_inner_dims) ]) + (lengths,)) inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:] return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes) def with_dim_size_dtype(self, dtype): if dtype not in (dtypes.int32, dtypes.int64): raise ValueError('dtype must be int32 or int64') if self.dim_size_dtype == dtype: return self return RaggedTensorDynamicShape( [math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes], math_ops.cast(self._inner_dim_sizes, dtype)) def broadcast_dynamic_shape(shape_x, shape_y): """Returns the shape formed by broadcasting two shapes to be compatible. Args: shape_x: A `RaggedTensorDynamicShape` shape_y: A `RaggedTensorDynamicShape` Returns: A `RaggedTensorDynamicShape`. Raises: ValueError: If `shape_x` and `shape_y` are not broadcast-compatible. """ if not isinstance(shape_x, RaggedTensorDynamicShape): raise TypeError('shape_x must be a RaggedTensorDynamicShape') if not isinstance(shape_y, RaggedTensorDynamicShape): raise TypeError('shape_y must be a RaggedTensorDynamicShape') # Broadcast both shapes to have the same rank. if shape_x.rank is None or shape_y.rank is None: raise ValueError('Unable to broadcast: unknown rank') broadcast_rank = max(shape_x.rank, shape_y.rank) shape_x = shape_x.broadcast_to_rank(broadcast_rank) shape_y = shape_y.broadcast_to_rank(broadcast_rank) # Broadcast dimensions one at a time, starting from the outermost dimension. for axis in range(broadcast_rank): shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis)) shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis)) return shape_x def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True): """Broadcasts a potentially ragged tensor to a ragged shape. Tiles `rt_input` as necessary to match the given shape. Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`. Args: rt_input: The potentially ragged tensor to broadcast. shape: A `RaggedTensorDynamicShape` broadcast_inner_dimensions: If false, then inner dimensions will not be tiled. Returns: A potentially ragged tensor whose values are taken from `rt_input`, and whose shape matches `shape`. """ if not isinstance(shape, RaggedTensorDynamicShape): raise TypeError('shape must be a RaggedTensorDynamicShape') rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input) # Broadcasting to a uniform shape. if shape.num_partitioned_dimensions == 0: return _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions) else: return _broadcast_to_ragged_shape(rt_input, shape, broadcast_inner_dimensions) def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions): """Broadcasts rt_input to the uniform shape `shape`.""" if isinstance(rt_input, ragged_tensor.RaggedTensor): raise ValueError('Incompatible with shape: ragged rank mismatch') if broadcast_inner_dimensions: return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes) else: return rt_input def _broadcast_to_ragged_shape(rt_input, dst_shape, broadcast_inner_dimensions): """Broadcasts rt_input to the ragged shape `dst_shape`.""" # Check that rt_input and dst_shape have the same row_splits dtype. if (isinstance(rt_input, ragged_tensor.RaggedTensor) and rt_input.row_splits.dtype != dst_shape.dim_size_dtype): if not ragged_config.auto_cast_partition_dtype(): raise ValueError('rt_input and dst_shape have different row_split ' 'dtypes; use RaggedTensor.with_row_splits_dtype() or ' 'RaggedTensorDynamicShape.with_dim_size_dtype() to ' 'convert to a compatible dtype.') rt_input = rt_input.with_row_splits_dtype(dtypes.int64) dst_shape = dst_shape.with_dim_size_dtype(dtypes.int64) # dst_shape's rank and ragged_rank must be greater than or equal to rt_input's if rt_input.shape.ndims is None or dst_shape.rank is None: raise ValueError('Unable to broadcast: unknown rank') if rt_input.shape.ndims > dst_shape.rank: raise ValueError('Incompatible with shape: rank mismatch') if (isinstance(rt_input, ragged_tensor.RaggedTensor) and rt_input.ragged_rank >= dst_shape.num_partitioned_dimensions): raise ValueError('Incompatible with shape: ragged rank mismatch') src_shape = RaggedTensorDynamicShape.from_tensor(rt_input) src_shape = src_shape.broadcast_to_rank(dst_shape.rank) # Add dimensions to rt_input so its rank and ragged_rank matches dst_shape. if dst_shape.rank > rt_input.shape.ndims: if rt_input.shape.ndims < dst_shape.num_inner_dimensions + 1: rt_input = array_ops.reshape( rt_input, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0)) for _ in range(dst_shape.rank - rt_input.shape.ndims): if ragged_tensor.is_ragged(rt_input): nrows = rt_input.nrows() else: nrows = array_ops.shape(rt_input, out_type=dst_shape.dim_size_dtype)[0] rt_input = ragged_tensor.RaggedTensor.from_row_lengths(rt_input, [nrows], validate=False) # Add ragged dimensions to match dst_shape. if ragged_tensor.is_ragged(rt_input): inner_rank_diff = ( rt_input.flat_values.shape.ndims - 1 - dst_shape.num_inner_dimensions) if inner_rank_diff > 0: rt_input = rt_input.with_flat_values( ragged_tensor.RaggedTensor.from_tensor( rt_input.flat_values, ragged_rank=inner_rank_diff, row_splits_dtype=dst_shape.dim_size_dtype)) else: rt_input = ragged_tensor.RaggedTensor.from_tensor( rt_input, ragged_rank=dst_shape.num_partitioned_dimensions - 1, row_splits_dtype=dst_shape.dim_size_dtype) # Do broadcasting for any dimensions that will remain uniform. We can do # these all at once, since they're independent of one another. multiples = [1] * dst_shape.rank for axis in range(dst_shape.num_partitioned_dimensions): if not src_shape.is_ragged(axis) and not dst_shape.is_ragged(axis): src_size = src_shape.dimension_size(axis) dst_size = dst_shape.dimension_size(axis) if ((tensor_util.constant_value(src_size) in (1, None)) and (tensor_util.constant_value(dst_size) != 1)): multiples[axis] = array_ops.where( math_ops.equal(src_size, 1), dst_size, 1) if not all(isinstance(v, int) and v == 1 for v in multiples): multiples = array_ops.stack(multiples, axis=0) rt_input = ragged_array_ops.tile(rt_input, multiples) if broadcast_inner_dimensions: rt_input = rt_input.with_flat_values( array_ops.reshape( rt_input.flat_values, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0))) # Do broadcasting for dimensions that become ragged. We must do these from # outermost to innermost. for axis in range(dst_shape.num_partitioned_dimensions): if not src_shape.is_ragged(axis) and dst_shape.is_ragged(axis): dst_size = dst_shape.dimension_size(axis) rt_input = _ragged_tile_axis(rt_input, axis, dst_size, dst_shape.dim_size_dtype) return rt_input def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype): """Tile a dimension of a RaggedTensor to match a ragged shape.""" assert axis > 0 # Outermost dimension may not be ragged. if not ragged_tensor.is_ragged(rt_input): rt_input = ragged_tensor.RaggedTensor.from_tensor( rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype) if axis > 1: return rt_input.with_values( _ragged_tile_axis(rt_input.values, axis - 1, repeats, row_splits_dtype)) else: src_row_splits = rt_input.nested_row_splits src_row_lengths = rt_input.nested_row_lengths() splits = src_row_splits[0] dst_row_lengths = [repeats] for i in range(1, len(src_row_lengths)): dst_row_lengths.append( ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats)) splits = array_ops.gather(src_row_splits[i], splits) dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits, repeats) return ragged_tensor.RaggedTensor.from_nested_row_lengths( dst_values, dst_row_lengths, validate=False)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor_shape.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_range op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest def prod(values): val = 1 for v in values: val *= v return val # return reduce(lambda x, y: x * y, values, 1) def mean(values): return 1.0 * sum(values) / len(values) def sqrt_n(values): return 1.0 * sum(values) / math.sqrt(len(values)) @test_util.run_all_in_graph_and_eager_modes class RaggedSegmentOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase): def expected_value(self, data, segment_ids, num_segments, combiner): """Find the expected value for a call to ragged_segment_<aggregate>. Args: data: The input RaggedTensor, expressed as a nested python list. segment_ids: The segment ids, as a python list of ints. num_segments: The number of segments, as a python int. combiner: The Python function used to combine values. Returns: The expected value, as a nested Python list. """ self.assertLen(data, len(segment_ids)) # Build an empty (num_segments x ncols) "grouped" matrix ncols = max(len(row) for row in data) grouped = [[[] for _ in range(ncols)] for row in range(num_segments)] # Append values from data[row] to grouped[segment_ids[row]] for row in range(len(data)): for col in range(len(data[row])): grouped[segment_ids[row]][col].append(data[row][col]) # Combine the values. return [[combiner(values) for values in grouped_row if values] for grouped_row in grouped] @parameterized.parameters( (ragged_math_ops.segment_sum, sum, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_sum, sum, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_sum, sum, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_sum, sum, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_prod, prod, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_prod, prod, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_prod, prod, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_prod, prod, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_min, min, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_min, min, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_min, min, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_min, min, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_max, max, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_max, max, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_max, max, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_max, max, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_mean, mean, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_mean, mean, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_mean, mean, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_mean, mean, [0, 0, 0, 10, 10, 10]), ) def testRaggedSegment_Int(self, segment_op, combiner, segment_ids): rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]] rt = ragged_factory_ops.constant(rt_as_list) num_segments = max(segment_ids) + 1 expected = self.expected_value(rt_as_list, segment_ids, num_segments, combiner) segmented = segment_op(rt, segment_ids, num_segments) self.assertAllEqual(segmented, expected) @parameterized.parameters( (ragged_math_ops.segment_sum, sum, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_sum, sum, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_sum, sum, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_sum, sum, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_prod, prod, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_prod, prod, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_prod, prod, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_prod, prod, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_min, min, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_min, min, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_min, min, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_min, min, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_max, max, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_max, max, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_max, max, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_max, max, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_mean, mean, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_mean, mean, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_mean, mean, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_mean, mean, [0, 0, 0, 10, 10, 10]), (ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 1, 1, 2, 2]), (ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 0, 1, 1, 1]), (ragged_math_ops.segment_sqrt_n, sqrt_n, [5, 4, 3, 2, 1, 0]), (ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 0, 10, 10, 10]), ) def testRaggedSegment_Float(self, segment_op, combiner, segment_ids): rt_as_list = [[0., 1., 2., 3.], [4.], [], [5., 6.], [7.], [8., 9.]] rt = ragged_factory_ops.constant(rt_as_list) num_segments = max(segment_ids) + 1 expected = self.expected_value(rt_as_list, segment_ids, num_segments, combiner) segmented = segment_op(rt, segment_ids, num_segments) self.assertAllClose(segmented, expected) def testRaggedRankTwo(self): rt = ragged_factory_ops.constant([ [[111, 112, 113, 114], [121],], # row 0 [], # row 1 [[], [321, 322], [331]], # row 2 [[411, 412]] # row 3 ]) # pyformat: disable segment_ids1 = [0, 2, 2, 2] segmented1 = ragged_math_ops.segment_sum(rt, segment_ids1, 3) expected1 = [[[111, 112, 113, 114], [121]], # row 0 [], # row 1 [[411, 412], [321, 322], [331]] # row 2 ] # pyformat: disable self.assertAllEqual(segmented1, expected1) segment_ids2 = [1, 2, 1, 1] segmented2 = ragged_math_ops.segment_sum(rt, segment_ids2, 3) expected2 = [[], [[111+411, 112+412, 113, 114], [121+321, 322], [331]], []] # pyformat: disable self.assertAllEqual(segmented2, expected2) def testRaggedSegmentIds(self): rt = ragged_factory_ops.constant([ [[111, 112, 113, 114], [121],], # row 0 [], # row 1 [[], [321, 322], [331]], # row 2 [[411, 412]] # row 3 ]) # pyformat: disable segment_ids = ragged_factory_ops.constant([[1, 2], [], [1, 1, 2], [2]]) segmented = ragged_math_ops.segment_sum(rt, segment_ids, 3) expected = [[], [111+321, 112+322, 113, 114], [121+331+411, 412]] # pyformat: disable self.assertAllEqual(segmented, expected) def testShapeMismatchError1(self): dt = constant_op.constant([1, 2, 3, 4, 5, 6]) segment_ids = ragged_factory_ops.constant([[1, 2], []]) self.assertRaisesRegexp( ValueError, 'segment_ids.shape must be a prefix of data.shape, ' 'but segment_ids is ragged and data is not.', ragged_math_ops.segment_sum, dt, segment_ids, 3) def testShapeMismatchError2(self): rt = ragged_factory_ops.constant([ [[111, 112, 113, 114], [121]], # row 0 [], # row 1 [[], [321, 322], [331]], # row 2 [[411, 412]] # row 3 ]) # pyformat: disable segment_ids = ragged_factory_ops.constant([[1, 2], [1], [1, 1, 2], [2]]) # Error is raised at graph-building time if we can detect it then. self.assertRaisesRegexp( errors.InvalidArgumentError, 'segment_ids.shape must be a prefix of data.shape.*', ragged_math_ops.segment_sum, rt, segment_ids, 3) # Otherwise, error is raised when we run the graph. segment_ids2 = ragged_tensor.RaggedTensor.from_row_splits( array_ops.placeholder_with_default(segment_ids.values, None), array_ops.placeholder_with_default(segment_ids.row_splits, None)) with self.assertRaisesRegexp( errors.InvalidArgumentError, 'segment_ids.shape must be a prefix of data.shape.*'): self.evaluate(ragged_math_ops.segment_sum(rt, segment_ids2, 3)) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_segment_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional operations for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.ragged import ragged_config from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def map_fn(fn, elems, dtype=None, parallel_iterations=None, back_prop=True, swap_memory=False, infer_shape=True, name=None): """map on the list of tensors unpacked from `elems` on dimension 0. The simplest version of `map_fn` repeatedly applies the callable `fn` to a sequence of elements from first to last. The elements are made of the tensors unpacked from `elems`. `dtype` is the data type of the return value of `fn`. Users must provide `dtype` if it is different from the data type of `elems`. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `[values.shape[0]] + fn(values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Furthermore, `fn` may emit a different structure than its input. For example, `fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case, the `dtype` parameter is not optional: `dtype` must be a type or (possibly nested) tuple of types matching the output of `fn`. To apply a functional operation to the nonzero elements of a SparseTensor one of the following methods is recommended. First, if the function is expressible as TensorFlow ops, use ```python result = SparseTensor(input.indices, fn(input.values), input.dense_shape) ``` If, however, the function is not expressible as a TensorFlow op, then use ```python result = SparseTensor( input.indices, map_fn(fn, input.values), input.dense_shape) ``` instead. When executing eagerly, map_fn does not execute in parallel even if `parallel_iterations` is set to a value > 1. You can still get the performance benefits of running a function in parallel by using the `tf.contrib.eager.defun` decorator, ```python # Assume the function being used in map_fn is fn. # To ensure map_fn calls fn in parallel, use the defun decorator. @tf.contrib.eager.defun def func(tensor): return tf.map_fn(fn, tensor) ``` Note that if you use the defun decorator, any non-TensorFlow Python code that you may have written in your function won't get executed. See `tf.contrib.eager.defun` for more details. The recommendation would be to debug without defun but switch to defun to get performance benefits of running map_fn in parallel. Args: fn: The callable to be performed. It accepts one argument, which will have the same (possibly nested) structure as `elems`. Its output must have the same structure as `dtype` if one is provided, otherwise it must have the same structure as `elems`. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be applied to `fn`. dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure of Tensors differing from the structure of `elems`, then `dtype` is not optional and must have the same structure as the output of `fn`. Use `RaggedTensorType` to declare an output of type `RaggedTensor`. parallel_iterations: (optional) The number of iterations allowed to run in parallel. When graph building, the default value is 10. While executing eagerly, the default value is set to 1. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. infer_shape: (optional) False disables tests for consistent output shapes. name: (optional) Name prefix for the returned tensors. Returns: A possibly nested sequence of potentially ragged tensors. Each tensor packs the results of applying `fn` to tensors unpacked from `elems` along the first dimension, from first to last. Raises: TypeError: if `fn` is not callable or the structure of the output of `fn` and `dtype` do not match, or if elems is a SparseTensor. ValueError: if the lengths of the output of `fn` and `dtype` do not match. #### Examples: ```python elems = np.array([1, 2, 3, 4, 5, 6]) squares = map_fn(lambda x: x * x, elems) # squares == [1, 4, 9, 16, 25, 36] ``` ```python elems = (np.array([1, 2, 3]), np.array([-1, 1, -1])) alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64) # alternate == [-1, 2, -3] ``` ```python elems = np.array([1, 2, 3]) alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64)) # alternates[0] == [1, 2, 3] # alternates[1] == [-1, -2, -3] ``` ```python elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]]) mean = map_fn(tf.reduce_mean, elems) # mean == [2, 4, 6] ``` ```python elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64) out = map_fn(fn=lambda x: x+1, elems, dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0)) # out = ragged.constant([[2, 3, 4], [5, 6], [7, 8]]) ``` """ if not callable(fn): raise TypeError("fn must be callable.") if isinstance(elems, sparse_tensor.SparseTensor): raise TypeError( "To perform a map on the values of a sparse tensor use either " " SparseTensor(input.indices, fn(input.values), input.dense_shape) or " " SparseTensor(input.indices, map_fn(fn, input.values), " "input.dense_shape)") in_graph_mode = not context.executing_eagerly() # Set the default number of parallel_iterations depending on graph/eager mode. if in_graph_mode and not parallel_iterations: parallel_iterations = 10 elif not in_graph_mode and not parallel_iterations: parallel_iterations = 1 if not in_graph_mode and parallel_iterations > 1: logging.log_first_n(logging.WARN, "Setting parallel_iterations > 1 has no " "effect when executing eagerly. Consider calling map_fn" " with tf.contrib.eager.defun to execute fn in " "parallel.", 1) parallel_iterations = 1 input_is_sequence = nest.is_sequence(elems) input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x] def input_pack(x): return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0] elems_flat = input_flatten(elems) elems_flat = ragged_tensor.match_row_splits_dtypes(*elems_flat) with ops.name_scope(name, "map", elems_flat): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally # and not issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True elems_flat = [ ragged_tensor.convert_to_tensor_or_ragged_tensor(elem, name="elem") for elem in elems_flat ] # We can either infer the output, or we can assume that it will be the same # as the input structure. dtype = dtype or input_pack([elem.dtype for elem in elems_flat]) # Find the number of iterations, n may be known statically. if isinstance(elems_flat[0], ragged_tensor.RaggedTensor): n = elems_flat[0].nrows(out_type=dtypes.int32) else: static_shape = elems_flat[0].shape if static_shape.ndims is not None and static_shape.ndims < 1: if len(elems_flat) == 1: raise ValueError( "elems must be a 1+ dimensional Tensor, not a scalar") else: raise ValueError( "elements in elems must be 1+ dimensional Tensors, not scalars") n = (tensor_shape.dimension_value(static_shape[0]) or array_ops.shape(elems_flat[0])[0]) n = math_ops.cast(n, dtype=dtypes.int32) # Create a flat list of TAs. # Flatten the dtype structure to a list. dtype_flat = nest.flatten(dtype) # decompose to components dtype_components = [_maybe_decompose_dtype(d) for d in dtype_flat] dtype_components_flat = nest.flatten(dtype_components) # Create TensorArrays. accs_ta = [ tensor_array_ops.TensorArray( dtype=t, dynamic_size=False, infer_shape=infer_shape, size=n) for t in dtype_components_flat ] i = constant_op.constant(0, dtype=dtypes.int32) def compute(i, tas): """The loop body of map_fn. Args: i: the loop counter tas: the flat TensorArray accumulator list Returns: (i + 1, tas): the updated counter + updated TensorArrays Raises: TypeError: if dtype and packed_fn_values structure do not match ValueType: if dtype and packed_fn_values lengths do not match """ # Get Tensors or RaggedTensors sliced at i, then pack it back to the # original structure. packed_values = input_pack([elem_flat[i] for elem_flat in elems_flat]) packed_fn_values = fn(packed_values) # Check that the structure of the output matches what was declared or # inferred. # nest.assert_same_structure(dtype or elems, packed_fn_values) # Flatten and decompose to a list of Tensors flat_fn_values = nest.flatten(packed_fn_values) # If we declared that we are expecting a RaggedTensor output, but we get a # Tensor output. We should try to convert it to a RaggedTensor. flat_fn_composite_tensors = list( _convert_declared(flat_fn_values, dtype_flat)) flat_fn_components = [ _maybe_decompose_tensor(t) for t in flat_fn_composite_tensors ] flat_fn_tensors = nest.flatten(flat_fn_components) # Write to TAs. tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_tensors)] return (i + 1, tas) _, r_a = control_flow_ops.while_loop( lambda i, _: i < n, compute, (i, accs_ta), parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) # Pack back into a list of components results_as_components = nest.pack_sequence_as(dtype_components, r_a) # Stack TensorArrays for Tensor outputs, and concat RaggedTensor outputs. def _stack_or_concat(e): if isinstance(e, _RaggedTensorComponents): return _concat_ragged_tensor_components(e) else: result = e.stack() return result results_flat_components = [ _stack_or_concat(e) for e in results_as_components ] results_packed = [ _maybe_recompose_tensor(c) for c in results_flat_components ] results_packed = nest.pack_sequence_as(dtype, results_packed) return results_packed class _RaggedTensorComponents( collections.namedtuple( "_RaggedTensorComponents", ["flat_values", "nested_row_lengths", "outer_row_length"])): """A namedtuple of components which represent a `RaggedTensor`. _RaggedTensorComponents is a list of components which can be used to create a `RaggedTensor`. Use this class to represent a `RaggedTensor` in situations where nest.flatten and nest.pack_sequence_as should decompose ragged tensors into their components.. The following are a list of components for a `RaggedTensor`: flat_values: The flat and inner values of a RaggedTensor. This could be a `Tensor`, a `TensorArray`, or a data type. nested_row_lengths: a tuple containing the row lengths of each rank. The elements of the tuple could be `Tensor`s or `TensorArray`s. outer_row_length: a `Tensor` or `TensorArray` containing the row length of the `RaggedTensor`'s outermost dimension. See `RaggedTensor` for more details of the use of each component. """ __slots__ = () def _concat_ragged_tensor_components(rt_ta): flat_values = rt_ta.flat_values.concat() nested_row_lengths = tuple( row_lengths_ta.concat() for row_lengths_ta in rt_ta.nested_row_lengths) outer_row_length = rt_ta.outer_row_length.concat() return _RaggedTensorComponents( flat_values=flat_values, nested_row_lengths=nested_row_lengths, outer_row_length=outer_row_length) def _maybe_decompose_tensor(rt): """Decompose tensors to their composite tensors.""" if not isinstance(rt, ragged_tensor.RaggedTensor): return rt # The three component pieces we need: # - inner values flat_values = rt.flat_values # - row_splits of the RT splits = rt.nested_row_splits nested_row_lengths = tuple(split[1:] - split[:-1] for split in splits) # - outer row length outer_row_length = array_ops.expand_dims(rt.nrows(), axis=0) return _RaggedTensorComponents( flat_values=flat_values, nested_row_lengths=nested_row_lengths, outer_row_length=outer_row_length, ) def _maybe_recompose_tensor(t): """Reconstructs a _RaggedTensorComponents into a RaggedTensor.""" if not isinstance(t, _RaggedTensorComponents): return t values = t.flat_values nested_row_lengths = tuple(t.nested_row_lengths) for nested_row_length in reversed(nested_row_lengths): values = ragged_tensor.RaggedTensor.from_row_lengths( values, nested_row_length, validate=False) return ragged_tensor.RaggedTensor.from_row_lengths(values, t.outer_row_length, validate=False) def _maybe_decompose_dtype(d): """Decompose dtypes into composite tensors (if necessary).""" if not isinstance(d, ragged_tensor.RaggedTensorType): return d result = _RaggedTensorComponents( flat_values=d.dtype, nested_row_lengths=tuple( d.row_splits_dtype for i in range(d.ragged_rank - 1)), outer_row_length=d.row_splits_dtype, ) return result def _convert_declared(fn_output_flat, output_declared): """Convert outputs which are `Tensor`s into `_RaggedTensorComponents`.""" for current, declared in zip(fn_output_flat, output_declared): if isinstance(declared, ragged_tensor.RaggedTensorType): yield _convert_declared_ragged(current, declared) else: yield current def _convert_declared_ragged(current, declared): """Converts an output with RaggedTensorType into a _RaggedTensorComponents.""" # Check that the ragged ranks match up. # + 1 to account for the rank of the outermost dimension. current_ragged_rank = getattr(current, "ragged_rank", 0) if declared.ragged_rank != current_ragged_rank + 1: raise ValueError( "The declared ragged rank (%d) mismatches the result (%d)" % (declared.ragged_rank, current_ragged_rank + 1)) # Check that dtypes match up. if declared.dtype != current.dtype: raise ValueError( "The declared dtype (%s) mismatches the result (%s)" % (declared.dtype, current.dtype)) if (isinstance(current, ragged_tensor.RaggedTensor) and declared.row_splits_dtype != current.row_splits.dtype): if not ragged_config.auto_cast_partition_dtype(): raise ValueError( "The declared row_splits dtype (%s) mismatches the result (%s)." " Use RaggedTensor.with_row_splits_dtype to convert it." % (declared.row_splits_dtype, current.row_splits.dtype)) current = current.with_row_splits_dtype(declared.row_splits_dtype) if isinstance(current, ragged_tensor.RaggedTensor): return current else: nrows = array_ops.shape(current, out_type=declared.row_splits_dtype)[0] row_length = array_ops.expand_dims(nrows, axis=0) return _RaggedTensorComponents( flat_values=current, nested_row_lengths=(), outer_row_length=row_length)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_map_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for RaggedTensor.from_sparse.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase): def testDocStringExample(self): st = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]], values=[1, 2, 3, 4, 5], dense_shape=[4, 3]) rt = RaggedTensor.from_sparse(st) self.assertAllEqual(rt, [[1, 2, 3], [4], [], [5]]) def testEmpty(self): st = sparse_tensor.SparseTensor( indices=array_ops.zeros([0, 2], dtype=dtypes.int64), values=[], dense_shape=[4, 3]) rt = RaggedTensor.from_sparse(st) self.assertAllEqual(rt, [[], [], [], []]) def testBadSparseTensorRank(self): st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3]) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st1) st2 = sparse_tensor.SparseTensor( indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3]) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st2) if not context.executing_eagerly(): st3 = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=[0], dense_shape=array_ops.placeholder(dtypes.int64)) self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2', RaggedTensor.from_sparse, st3) def testGoodPartialSparseTensorRank(self): if not context.executing_eagerly(): st1 = sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0], dense_shape=array_ops.placeholder(dtypes.int64)) st2 = sparse_tensor.SparseTensor( indices=array_ops.placeholder(dtypes.int64), values=[0], dense_shape=[4, 3]) # Shouldn't throw ValueError RaggedTensor.from_sparse(st1) RaggedTensor.from_sparse(st2) def testNonRaggedSparseTensor(self): # "index_suffix" means the value of the innermost dimension of the index # (i.e., indices[i][-1]). # See comments in _assert_sparse_indices_are_ragged_right() for more # details/background. # index_suffix of first index is not zero. st1 = sparse_tensor.SparseTensor( indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st1)) # index_suffix of an index that starts a new row is not zero. st2 = sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st2)) # index_suffix of an index that continues a row skips a cell. st3 = sparse_tensor.SparseTensor( indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'.*SparseTensor is not right-ragged'): self.evaluate(RaggedTensor.from_sparse(st3)) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.rank op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedRankOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # Rank 0 dict( test_input=1, expected_rank=0, ), # Rank 1 dict( test_input=[1], expected_rank=1, ), dict( test_input=[1, 2, 3, 4], expected_rank=1, ), # Rank 2 dict( test_input=[[1], [2], [3]], expected_rank=2, ), # Rank 3 dict( test_input=[[[1], [2, 3]], [[4], [5, 6, 7]]], expected_rank=3, ), # Rank 3, ragged_rank=2 dict( test_input=[[[1], [2, 3], [10, 20]], [[4], [5, 6, 7]]], expected_rank=3, ragged_rank=2, ), # Rank 4, ragged_rank=3 with dimensions: {2, (1, 2), (2), (1, 2)} dict( test_input=[[[[1], [2]]], [[[3, 4], [5, 6]], [[7, 8], [9, 10]]]], expected_rank=4, ), # Rank 4, ragged_rank=2 with dimensions: {2, (1, 2), (1, 2), 2} dict( test_input=[ [[[1, 2]]], [[[5, 6], [7, 8]], [[9, 10], [11, 12]]]], expected_rank=4, ragged_rank=2, ), ]) def testRaggedRank(self, test_input, expected_rank, ragged_rank=None): test_input = ragged_factory_ops.constant( test_input, ragged_rank=ragged_rank) self.assertAllEqual(ragged_array_ops.rank( test_input), expected_rank) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_rank_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python-style indexing and slicing for RaggedTensors.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor def ragged_tensor_getitem(self, key): """Returns the specified piece of this RaggedTensor. Supports multidimensional indexing and slicing, with one restriction: indexing into a ragged inner dimension is not allowed. This case is problematic because the indicated value may exist in some rows but not others. In such cases, it's not obvious whether we should (1) report an IndexError; (2) use a default value; or (3) skip that value and return a tensor with fewer rows than we started with. Following the guiding principles of Python ("In the face of ambiguity, refuse the temptation to guess"), we simply disallow this operation. Any dimensions added by `array_ops.newaxis` will be ragged if the following dimension is ragged. Args: self: The RaggedTensor to slice. key: Indicates which piece of the RaggedTensor to return, using standard Python semantics (e.g., negative values index from the end). `key` may have any of the following types: * `int` constant * Scalar integer `Tensor` * `slice` containing integer constants and/or scalar integer `Tensor`s * `Ellipsis` * `tf.newaxis` * `tuple` containing any of the above (for multidimentional indexing) Returns: A `Tensor` or `RaggedTensor` object. Values that include at least one ragged dimension are returned as `RaggedTensor`. Values that include no ragged dimensions are returned as `Tensor`. See above for examples of expressions that return `Tensor`s vs `RaggedTensor`s. Raises: ValueError: If `key` is out of bounds. ValueError: If `key` is not supported. TypeError: If the indices in `key` have an unsupported type. Examples: ```python >>> # A 2-D ragged tensor with 1 ragged dimension. >>> rt = ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']]) >>> rt[0].eval().tolist() # First row (1-D `Tensor`) ['a', 'b', 'c'] >>> rt[:3].eval().tolist() # First three rows (2-D RaggedTensor) [['a', 'b', 'c'], ['d', 'e'], '[f'], [g']] >>> rt[3, 0].eval().tolist() # 1st element of 4th row (scalar) 'g' >>> # A 3-D ragged tensor with 2 ragged dimensions. >>> rt = ragged.constant([[[1, 2, 3], [4]], ... [[5], [], [6]], ... [[7]], ... [[8, 9], [10]]]) >>> rt[1].eval().tolist() # Second row (2-D RaggedTensor) [[5], [], [6]] >>> rt[3, 0].eval().tolist() # First element of fourth row (1-D Tensor) [8, 9] >>> rt[:, 1:3].eval().tolist() # Items 1-3 of each row (3-D RaggedTensor) [[[4]], [[], [6]], [], [[10]]] >>> rt[:, -1:].eval().tolist() # Last item of each row (3-D RaggedTensor) [[[4]], [[6]], [[7]], [[10]]] ``` """ scope_tensors = [self] + list(_tensors_in_key_list(key)) if isinstance(key, (list, tuple)): key = list(key) else: key = [key] with ops.name_scope(None, "RaggedGetItem", scope_tensors): return _ragged_getitem(self, key) def _ragged_getitem(rt_input, key_list): """Helper for indexing and slicing ragged tensors with __getitem__(). Extracts the specified piece of the `rt_input`. See `RaggedTensor.__getitem__` for examples and restrictions. Args: rt_input: The `RaggedTensor` from which a piece should be returned. key_list: The list of keys specifying which piece to return. Each key corresponds with a separate dimension. Returns: The indicated piece of rt_input. Raises: ValueError: If `key_list` is not supported. TypeError: If any keys in `key_list` have an unsupported type. """ if not key_list: return rt_input row_key = key_list[0] inner_keys = key_list[1:] if row_key is Ellipsis: expanded_key_list = _expand_ellipsis(key_list, rt_input.shape.ndims) return _ragged_getitem(rt_input, expanded_key_list) # Adding a new axis: Get rt_input[inner_keys], and wrap it in a RaggedTensor # that puts all values in a single row. if row_key is array_ops.newaxis: inner_rt = _ragged_getitem(rt_input, inner_keys) nsplits = array_ops.shape(inner_rt.row_splits, out_type=inner_rt.row_splits.dtype)[0] return ragged_tensor.RaggedTensor.from_row_splits( inner_rt, array_ops.stack([0, nsplits - 1]), validate=False) # Slicing a range of rows: first slice the outer dimension, and then # call `_ragged_getitem_inner_dimensions` to handle the inner keys. if isinstance(row_key, slice): sliced_rt_input = _slice_ragged_row_dimension(rt_input, row_key) return _ragged_getitem_inner_dimensions(sliced_rt_input, inner_keys) # Indexing a single row: slice values to get the indicated row, and then # use a recursive call to __getitem__ to handle the inner keys. else: starts = rt_input.row_splits[:-1] limits = rt_input.row_splits[1:] if context.executing_eagerly(): # In python, __getitem__ should throw IndexError for out of bound # indices. This will allow iteration run correctly as python will # translate IndexError into StopIteration for next()/__next__(). # Below is an example: # import tensorflow as tf # r = tf.ragged.constant([[1., 2.], [3., 4., 5.], [6.]]) # for elem in r: # print(elem) # In non eager mode, the exception is thrown when session runs # so we don't know if out of bound happens before. # In eager mode, however, it is possible to find out when to # throw out of bound IndexError. # In the following row_key >= len(starts) is checked. In case of # TypeError which happens when row_key is not an integer, the exception # will simply be ignored as it will be processed later anyway. try: if int(row_key) >= len(starts): raise IndexError("Row key {} out of bounds".format(row_key)) except (TypeError, ValueError): pass row = rt_input.values[starts[row_key]:limits[row_key]] return row.__getitem__(inner_keys) def _slice_ragged_row_dimension(rt_input, row_key): """Slice the outer dimension of `rt_input` according to the given `slice`. Args: rt_input: The `RaggedTensor` to slice. row_key: The `slice` object that should be used to slice `rt_input`. Returns: A `RaggedTensor` containing the indicated slice of `rt_input`. """ if row_key.start is None and row_key.stop is None and row_key.step is None: return rt_input # Use row_key to slice the starts & limits. new_starts = rt_input.row_splits[:-1][row_key] new_limits = rt_input.row_splits[1:][row_key] zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype) # If there's no slice step, then we can just select a single continuous # span of `ragged.values(rt_input)`. if row_key.step is None or row_key.step == 1: # Construct the new splits. If new_starts and new_limits are empty, # then this reduces to [0]. Otherwise, this reduces to: # concat([[new_starts[0]], new_limits]) new_splits = array_ops.concat( [zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits], axis=0) values_start = new_splits[0] values_limit = new_splits[-1] return ragged_tensor.RaggedTensor.from_row_splits( rt_input.values[values_start:values_limit], new_splits - values_start, validate=False) # If there is a slice step (aka a strided slice), then use ragged_gather to # collect the necessary elements of `ragged.values(rt_input)`. else: return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1, rt_input.values) def _ragged_getitem_inner_dimensions(rt_input, key_list): """Retrieve inner dimensions, keeping outermost dimension unchanged. Args: rt_input: The `RaggedTensor` or `Tensor` from which a piece should be extracted. key_list: The __getitem__ keys for slicing the inner dimensions. Returns: A `RaggedTensor`. Raises: ValueError: If key_list is not supported. """ if not key_list: return rt_input if isinstance(rt_input, ops.Tensor): return rt_input.__getitem__([slice(None, None, None)] + key_list) column_key = key_list[0] if column_key is Ellipsis: expanded_key_list = _expand_ellipsis(key_list, rt_input.values.shape.ndims) return _ragged_getitem_inner_dimensions(rt_input, expanded_key_list) # Adding a new axis to a ragged inner dimension: recursively get the inner # dimensions of rt_input with key_list[1:], and then wrap the result in a # RaggedTensor that puts each value in its own row. if column_key is array_ops.newaxis: inner_rt = _ragged_getitem_inner_dimensions(rt_input, key_list[1:]) nsplits = array_ops.shape(inner_rt.row_splits, out_type=inner_rt.row_splits.dtype)[0] return ragged_tensor.RaggedTensor.from_row_splits(inner_rt, math_ops.range(nsplits), validate=False) # Slicing a range of columns in a ragged inner dimension. We use a # recursive call to process the values, and then assemble a RaggedTensor # with those values. if isinstance(column_key, slice): if (column_key.start is None and column_key.stop is None and column_key.step is None): # Trivial slice: recursively process all values, & splits is unchanged. return rt_input.with_values( _ragged_getitem_inner_dimensions(rt_input.values, key_list[1:])) else: # Nontrivial slice: use ragged_gather to extract the indicated slice as # a new RaggedTensor (inner_rt), and then recursively process its values. # The splits can be taken from inner_rt.row_splits(). inner_rt_starts = rt_input.row_splits[:-1] inner_rt_limits = rt_input.row_splits[1:] if column_key.start is not None and column_key.start != 0: inner_rt_starts = _add_offset_to_ranges( column_key.start, rt_input.row_splits[:-1], rt_input.row_splits[1:]) if column_key.stop is not None and column_key.stop != 0: inner_rt_limits = _add_offset_to_ranges( column_key.stop, rt_input.row_splits[:-1], rt_input.row_splits[1:]) inner_rt = _build_ragged_tensor_from_value_ranges( inner_rt_starts, inner_rt_limits, column_key.step, rt_input.values) return inner_rt.with_values( _ragged_getitem_inner_dimensions(inner_rt.values, key_list[1:])) # Indexing a single column in a ragged inner dimension: raise an Exception. # See RaggedTensor.__getitem__.__doc__ for an explanation of why indexing # into a ragged inner dimension is problematic. else: raise ValueError("Cannot index into an inner ragged dimension.") def _expand_ellipsis(key_list, num_remaining_dims): """Expands the ellipsis at the start of `key_list`. Assumes that the first element of `key_list` is Ellipsis. This will either remove the Ellipsis (if it corresponds to zero indices) or prepend a new `slice(None, None, None)` (if it corresponds to more than zero indices). Args: key_list: The arguments to `__getitem__()`. num_remaining_dims: The number of dimensions remaining. Returns: A copy of `key_list` with he ellipsis expanded. Raises: ValueError: If ragged_rank.shape.ndims is None IndexError: If there are too many elements in `key_list`. """ if num_remaining_dims is None: raise ValueError("Ellipsis not supported for unknown shape RaggedTensors") num_indices = sum(1 for idx in key_list if idx is not array_ops.newaxis) if num_indices > num_remaining_dims + 1: raise IndexError("Too many indices for RaggedTensor") elif num_indices == num_remaining_dims + 1: return key_list[1:] else: return [slice(None, None, None)] + key_list def _tensors_in_key_list(key_list): """Generates all Tensors in the given slice spec.""" if isinstance(key_list, ops.Tensor): yield key_list if isinstance(key_list, (list, tuple)): for v in key_list: for tensor in _tensors_in_key_list(v): yield tensor if isinstance(key_list, slice): for tensor in _tensors_in_key_list(key_list.start): yield tensor for tensor in _tensors_in_key_list(key_list.stop): yield tensor for tensor in _tensors_in_key_list(key_list.step): yield tensor def _build_ragged_tensor_from_value_ranges(starts, limits, step, values): """Returns a `RaggedTensor` containing the specified sequences of values. Returns a RaggedTensor `output` where: ```python output.shape[0] = starts.shape[0] output[i] = values[starts[i]:limits[i]:step] ``` Requires that `starts.shape == limits.shape` and `0 <= starts[i] <= limits[i] <= values.shape[0]`. Args: starts: 1D integer Tensor specifying the start indices for the sequences of values to include. limits: 1D integer Tensor specifying the limit indices for the sequences of values to include. step: Integer value specifying the step size for strided slices. values: The set of values to select from. Returns: A `RaggedTensor`. Raises: ValueError: Until the prerequisite ops are checked in. """ # Use `ragged_range` to get the index of each value we should include. if step is None: step = 1 step = ops.convert_to_tensor(step, name="step") if step.dtype.is_integer: step = math_ops.cast(step, starts.dtype) else: raise TypeError("slice strides must be integers or None") value_indices = ragged_math_ops.range(starts, limits, step, row_splits_dtype=starts.dtype) # Use `ragged_gather` or `array_ops.gather` to collect the values. if isinstance(values, ragged_tensor.RaggedTensor): gathered_values = ragged_gather_ops.gather( params=values, indices=value_indices.values) else: gathered_values = array_ops.gather( params=values, indices=value_indices.values) # Assemble the RaggedTensor from splits & values. return value_indices.with_values(gathered_values) def _add_offset_to_ranges(offset, starts, limits): """Adds an indexing offset to each of the specified ranges. If offset>=0, then return output[i]=min(starts[i]+offset, limits[i]) If offset<0, then return output[i]=max(limits[i]+offset, starts[i]) Args: offset: The offset to add. None, or an int, or a scalar Tensor. starts: 1-D integer tensor containing start indices. limits: 1-D integer tensor containing limit indices. Returns: A 1-D integer tensor. """ def map_positive_offset(offset): return math_ops.minimum(starts + offset, limits) def map_negative_offset(offset): return math_ops.maximum(limits + offset, starts) if isinstance(offset, ops.Tensor): offset = math_ops.cast(offset, starts.dtype) return control_flow_ops.cond(offset >= 0, lambda: map_positive_offset(offset), lambda: map_negative_offset(offset)) elif isinstance(offset, int): return (map_positive_offset(offset) if offset > 0 else map_negative_offset(offset)) else: raise TypeError("slice offsets must be integers or None")
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_getitem.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.tile.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTileOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ #========================================================================= # Docstring Example #========================================================================= dict( descr='docstring example: ragged_rank=1, repeat axes 0 and 1', rt_input=[[1, 2], [3]], multiples=[3, 2], expected=[ [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]], ), #========================================================================= # rank=3, ragged_rank=2 #========================================================================= dict( descr='rank=3, ragged_rank=2, repeat axis 0', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[2, 1, 1], expected=[[[1, 2], [3]], [], [[4]], [[1, 2], [3]], [], [[4]]]), dict( descr='rank=3, ragged_rank=2, repeat axis 1', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[1, 2, 1], expected=[[[1, 2], [3], [1, 2], [3]], [], [[4], [4]]]), dict( descr='rank=3, ragged_rank=2, repeat axis 2', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[1, 1, 2], expected=[[[1, 2, 1, 2], [3, 3]], [], [[4, 4]]]), dict( descr='rank=3, ragged_rank=2, repeat axes 0 and 1', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[2, 2, 1], expected=[[[1, 2], [3], [1, 2], [3]], [], [[4], [4]], [[1, 2], [3], [1, 2], [3]], [], [[4], [4]]]), dict( descr='rank=3, ragged_rank=2, repeat axes 0 and 2', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[2, 1, 2], expected=[[[1, 2, 1, 2], [3, 3]], [], [[4, 4]], [[1, 2, 1, 2], [3, 3]], [], [[4, 4]]]), dict( descr='rank=3, ragged_rank=2, repeat axes 1 and 2', rt_input=[[[1, 2], [3]], [], [[4]]], multiples=[1, 2, 2], expected=[[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]], [], [[4, 4], [4, 4]]]), dict( descr='rank=3, ragged_rank=2, repeat all axes', rt_input=[[['a', 'b'], ['c']], [], [['d']]], multiples=[4, 3, 2], expected=[[[b'a', b'b']*2, [b'c']*2]*3, []*3, [[b'd']*2]*3]*4), #========================================================================= # rank=3, ragged_rank=1 #========================================================================= dict( descr='rank=3, ragged_rank=1, repeat axis 0', ragged_rank=1, rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]], multiples=[2, 1, 1], expected=[[[1, 2], [3, 4]], [], [[5, 6]], [[1, 2], [3, 4]], [], [[5, 6]]]), dict( descr='rank=3, ragged_rank=1, repeat axis 1', ragged_rank=1, rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]], multiples=[1, 2, 1], expected=[[[1, 2], [3, 4], [1, 2], [3, 4]], [], [[5, 6], [5, 6]]]), dict( descr='rank=3, ragged_rank=1, repeat axis 2', ragged_rank=1, rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]], multiples=[1, 1, 2], expected=[[[1, 2, 1, 2], [3, 4, 3, 4]], [], [[5, 6, 5, 6]]]), #========================================================================= # rank=4, ragged_rank=3 #========================================================================= dict( descr='rank=4, ragged_rank=3, repeat axis 0', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[2, 1, 1, 1], expected=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]], [[[1], [2]], [[3]]], [[]], [[[4, 5]]]]), dict( descr='rank=4, ragged_rank=3, repeat axis 1', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 2, 1, 1], expected=[[[[1], [2]], [[3]], [[1], [2]], [[3]]], [[], []], [[[4, 5]], [[4, 5]]]]), dict( descr='rank=4, ragged_rank=3, repeat axis 2', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 1, 2, 1], expected=[[[[1], [2], [1], [2]], [[3], [3]]], [[]], [[[4, 5], [4, 5]]]]), dict( descr='rank=4, ragged_rank=3, repeat axis 3', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 1, 1, 2], expected=[[[[1, 1], [2, 2]], [[3, 3]]], [[]], [[[4, 5, 4, 5]]]]), dict( descr='rank=4, ragged_rank=3, repeat all axes', rt_input=[[[['a'], ['b']], [['c']]], [[]], [[['d', 'e']]]], multiples=[5, 4, 3, 2], expected=[[[[b'a']*2, [b'b']*2]*3, [[b'c']*2]*3]*4, [[]*3]*4, [[[b'd', b'e']*2]*3]*4]*5), dict( descr='rank=5, ragged_rank=4, repeat all axes', rt_input=[[[[['a']]]]], multiples=[6, 5, 4, 3, 2], expected=[[[[[b'a']*2]*3]*4]*5]*6), #========================================================================= # multiple=0 #========================================================================= dict( descr='rank=4, ragged_rank=3, repeat axis 0 (multiple=0)', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[0, 1, 1, 1], expected=[]), dict( descr='rank=4, ragged_rank=3, repeat axis 1 (multiple=0)', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 0, 1, 1], expected=[[], [], []]), dict( descr='rank=4, ragged_rank=3, repeat axis 2 (multiple=0)', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 1, 0, 1], expected=[[[], []], [[]], [[]]]), dict( descr='rank=4, ragged_rank=3, repeat axis 3 (multiple=0)', rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]], multiples=[1, 1, 1, 0], expected=[[[[], []], [[]]], [[]], [[[]]]]), #========================================================================= # multiple=1 #========================================================================= dict( descr='rank=4, multiples=1 (no repeats)', rt_input=[[[[1], [2]], [[3], [4]]], [[[5], [6]]]], multiples=[1, 1, 1, 1], expected=[[[[1], [2]], [[3], [4]]], [[[5], [6]]]]), ]) # pyformat: disable def testRaggedTile(self, descr, rt_input, multiples, expected, ragged_rank=None): rt = ragged_factory_ops.constant(rt_input, ragged_rank) expected_shape = [ None if dim is None else dim * multiple for (dim, multiple) in zip(rt.shape.as_list(), multiples) ] # Test with both const & non-const multiples: ragged_tile has a few code # paths that optimize the case where multiples[d] is known to be 1. const_multiples = constant_op.constant(multiples, dtypes.int64) non_const_multiples = array_ops.placeholder_with_default( const_multiples, shape=[len(multiples)]) for multiples_tensor in (const_multiples, non_const_multiples): tiled = ragged_array_ops.tile(rt, multiples_tensor) self.assertEqual(tiled.ragged_rank, rt.ragged_rank) self.assertEqual(tiled.shape.ndims, rt.shape.ndims) if multiples_tensor is const_multiples: self.assertEqual(tiled.shape.as_list(), expected_shape) self.assertAllEqual(tiled, expected) def testRaggedTileWithTensorInput(self): # When the input is a `Tensor`, ragged_tile just delegates to tf.tile. dt = constant_op.constant([[1, 2], [3, 4]]) tiled = ragged_array_ops.tile(dt, [3, 2]) expected = [[1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4], [1, 2, 1, 2], [3, 4, 3, 4]] # pyformat: disable self.assertAllEqual(tiled, expected) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tile_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops to convert between RaggedTensors and other tensor types.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_ragged_conversion_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_tensor def from_tensor(tensor, lengths=None, padding=None, ragged_rank=1, row_splits_dtype=dtypes.int64, name=None): if ragged_tensor.is_ragged(tensor): return tensor else: return ragged_tensor.RaggedTensor.from_tensor( tensor, lengths=lengths, padding=padding, ragged_rank=ragged_rank, row_splits_dtype=row_splits_dtype, name=name) def to_tensor(rt_input, default_value=None, name=None): if ragged_tensor.is_ragged(rt_input): return rt_input.to_tensor(default_value, name) else: return rt_input def _get_row_partition_type_tensor_pairs_tail(rt_value): """Gets a list of the row partitions for rt_value. If parent_indices are defined, then they are used. Otherwise, row_splits are used. This assumes that rt_input is nested inside another RaggedTensor. If it is a tensor, then return an empty list. Args: rt_value: a ragged tensor value. May be a tensor. Returns: A list of (row_partition_type, row_partition_tensor) pairs. """ if isinstance(rt_value, ragged_tensor.RaggedTensor): tail = _get_row_partition_type_tensor_pairs_tail(rt_value.values) if rt_value._cached_value_rowids is not None: # pylint: disable=protected-access return [("VALUE_ROWIDS", rt_value.value_rowids())] + tail else: return [("ROW_SPLITS", rt_value.row_splits)] + tail return [] def _get_row_partition_type_tensor_pairs(rt_input): """Gets a list of the row partitions for rt_input. If value_rowids are defined, then they are used. Otherwise, row_splits are used. If the outermost level has value_rowids defind, then nrows is also added. Args: rt_input: a ragged tensor. Returns: A list of (row_partition_type, row_partition_tensor) pairs. """ tail = _get_row_partition_type_tensor_pairs_tail(rt_input.values) if rt_input._cached_value_rowids is not None: # pylint: disable=protected-access return [("FIRST_DIM_SIZE", rt_input.nrows()), ("VALUE_ROWIDS", rt_input.value_rowids())] + tail else: return [("ROW_SPLITS", rt_input.row_splits)] + tail def _shape_as_tensor(shape, dtype): """Takes shape and coerces it to a shape as a tensor. If the object is already a tensor, simply passes it on (result is guaranteed to be int64 or int32, but not necessarily dtype). If not, creates a tensor of type dtype. Result is either a scalar equal to -1 if the shape is unknown_rank. Otherwise, it is a vector, where unknown dimensions are represented with a value of -1. In C++, see TensorShapeFromTensor for parsing shapes in kernels, and InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for use in the shape inference function. Args: shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]], Tuple[Optional[Int]]. dtype: tf.int64 or tf.int32 Returns: a scalar or vector tensor of dtype tf.int32 or tf.int64. """ if dtype != dtypes.int64 and dtype != dtypes.int32: raise ValueError("Expected int64 or int32 for dtype: got {}".format(dtype)) if isinstance(shape, ops.Tensor): if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32: return math_ops.cast(shape, dtype) return shape shape = tensor_shape.as_shape(shape) if not shape: # Imply rank is unknown using a -1 scalar. return constant_op.constant(-1, dtype=dtype) shape = [(-1 if x is None else x) for x in shape.as_list()] # At this point, shape is List[Int]. return constant_op.constant(shape, dtype=dtype) # TODO(martinz): add a gradient for this op. # TODO(martinz): this is a replacement for RaggedTensor.to_tensor. Move this # after there is a chance for the kernels to propagate. def ragged_to_dense(rt_input, default_value=None, shape=None): """Create a dense tensor from a ragged tensor. If the shape is None, then the resulting dense tensor is the same size as the maximum length of the ragged tensor in each dimension. If the shape is not None, then it must be the same number of dimensions as the ragged tensor. For dimension i, if shape[i] is None, then the maximum length of the ragged tensor in that dimension is the size of the output in that dimension. If shape[i] is an integer, then that is the size of the output in that dimension. Args: rt_input: the tensor to densify. default_value: used when a value is missing. shape: the shape of the resulting tensor. Returns: a dense tensor. """ type_tensor_pairs = _get_row_partition_type_tensor_pairs(rt_input) row_partition_types = [x[0] for x in type_tensor_pairs] row_partition_tensors = [x[1] for x in type_tensor_pairs] values = rt_input.flat_values if default_value is None: default_value = array_ops.zeros((), values.dtype) shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype) return gen_ragged_conversion_ops.ragged_tensor_to_tensor( shape=shape_tensor, values=values, default_value=default_value, row_partition_types=row_partition_types, row_partition_tensors=row_partition_tensors) def to_sparse(rt_input, name=None): return rt_input.to_sparse(name) def from_sparse(st_input, name=None): return ragged_tensor.RaggedTensor.from_sparse(st_input, name)
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_conversion_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_util from tensorflow.python.platform import googletest # Example 3d tensor for test cases. Has shape [4, 2, 3]. TENSOR_3D = [[[('%d%d%d' % (i, j, k)).encode('utf-8') for k in range(3)] for j in range(2)] for i in range(4)] # Example 4d tensor for test cases. Has shape [4, 2, 3, 5]. TENSOR_4D = [[[[('%d%d%d%d' % (i, j, k, l)).encode('utf-8') for l in range(5)] for k in range(3)] for j in range(2)] for i in range(4)] @test_util.run_all_in_graph_and_eager_modes class RaggedUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # Docstring examples dict( data=['a', 'b', 'c'], repeats=[3, 0, 2], axis=0, expected=[b'a', b'a', b'a', b'c', b'c']), dict( data=[[1, 2], [3, 4]], repeats=[2, 3], axis=0, expected=[[1, 2], [1, 2], [3, 4], [3, 4], [3, 4]]), dict( data=[[1, 2], [3, 4]], repeats=[2, 3], axis=1, expected=[[1, 1, 2, 2, 2], [3, 3, 4, 4, 4]]), # Scalar repeats value dict( data=['a', 'b', 'c'], repeats=2, axis=0, expected=[b'a', b'a', b'b', b'b', b'c', b'c']), dict( data=[[1, 2], [3, 4]], repeats=2, axis=0, expected=[[1, 2], [1, 2], [3, 4], [3, 4]]), dict( data=[[1, 2], [3, 4]], repeats=2, axis=1, expected=[[1, 1, 2, 2], [3, 3, 4, 4]]), # data & repeats are broadcast to have at least one dimension, # so these are all equivalent: dict(data=3, repeats=4, axis=0, expected=[3, 3, 3, 3]), dict(data=[3], repeats=4, axis=0, expected=[3, 3, 3, 3]), dict(data=3, repeats=[4], axis=0, expected=[3, 3, 3, 3]), dict(data=[3], repeats=[4], axis=0, expected=[3, 3, 3, 3]), # Empty tensor dict(data=[], repeats=[], axis=0, expected=[]), ]) def testRepeat(self, data, repeats, expected, axis=None): result = ragged_util.repeat(data, repeats, axis) self.assertAllEqual(result, expected) @parameterized.parameters([ dict(mode=mode, **args) for mode in ['constant', 'dynamic', 'unknown_shape'] for args in [ # data & repeats are broadcast to have at least one dimension, # so these are all equivalent: dict(data=3, repeats=4, axis=0), dict(data=[3], repeats=4, axis=0), dict(data=3, repeats=[4], axis=0), dict(data=[3], repeats=[4], axis=0), # 1-dimensional data tensor. dict(data=[], repeats=5, axis=0), dict(data=[1, 2, 3], repeats=5, axis=0), dict(data=[1, 2, 3], repeats=[3, 0, 2], axis=0), dict(data=[1, 2, 3], repeats=[3, 0, 2], axis=-1), dict(data=[b'a', b'b', b'c'], repeats=[3, 0, 2], axis=0), # 2-dimensional data tensor. dict(data=[[1, 2, 3], [4, 5, 6]], repeats=3, axis=0), dict(data=[[1, 2, 3], [4, 5, 6]], repeats=3, axis=1), dict(data=[[1, 2, 3], [4, 5, 6]], repeats=[3, 5], axis=0), dict(data=[[1, 2, 3], [4, 5, 6]], repeats=[3, 5, 7], axis=1), # 3-dimensional data tensor: shape=[4, 2, 3]. dict(data=TENSOR_3D, repeats=2, axis=0), dict(data=TENSOR_3D, repeats=2, axis=1), dict(data=TENSOR_3D, repeats=2, axis=2), dict(data=TENSOR_3D, repeats=[2, 0, 4, 1], axis=0), dict(data=TENSOR_3D, repeats=[3, 2], axis=1), dict(data=TENSOR_3D, repeats=[1, 3, 1], axis=2), # 4-dimensional data tensor: shape=[4, 2, 3, 5]. dict(data=TENSOR_4D, repeats=2, axis=0), dict(data=TENSOR_4D, repeats=2, axis=1), dict(data=TENSOR_4D, repeats=2, axis=2), dict(data=TENSOR_4D, repeats=2, axis=3), dict(data=TENSOR_4D, repeats=[2, 0, 4, 1], axis=0), dict(data=TENSOR_4D, repeats=[3, 2], axis=1), dict(data=TENSOR_4D, repeats=[1, 3, 1], axis=2), dict(data=TENSOR_4D, repeats=[1, 3, 0, 0, 2], axis=3), ] ]) def testValuesMatchesNumpy(self, mode, data, repeats, axis): # Exception: we can't handle negative axis if data.ndims is unknown. if axis < 0 and mode == 'unknown_shape': return expected = np.repeat(data, repeats, axis) if mode == 'constant': data = constant_op.constant(data) repeats = constant_op.constant(repeats) elif mode == 'dynamic': data = constant_op.constant(data) repeats = constant_op.constant(repeats) data = array_ops.placeholder_with_default(data, data.shape) repeats = array_ops.placeholder_with_default(repeats, repeats.shape) elif mode == 'unknown_shape': data = array_ops.placeholder_with_default(data, None) repeats = array_ops.placeholder_with_default(repeats, None) result = ragged_util.repeat(data, repeats, axis) self.assertAllEqual(result, expected) @parameterized.parameters([ dict( descr='axis >= rank(data)', mode='dynamic', data=[1, 2, 3], repeats=[3, 0, 2], axis=1, error='axis=1 out of bounds: expected -1<=axis<1'), dict( descr='axis < -rank(data)', mode='dynamic', data=[1, 2, 3], repeats=[3, 0, 2], axis=-2, error='axis=-2 out of bounds: expected -1<=axis<1'), dict( descr='len(repeats) != data.shape[axis]', mode='dynamic', data=[[1, 2, 3], [4, 5, 6]], repeats=[2, 3], axis=1, error='Dimensions 3 and 2 are not compatible'), dict( descr='rank(repeats) > 1', mode='dynamic', data=[[1, 2, 3], [4, 5, 6]], repeats=[[3], [5]], axis=1, error=r'Shape \(2, 1\) must have rank at most 1'), dict( descr='non-integer axis', mode='constant', data=[1, 2, 3], repeats=2, axis='foo', exception=TypeError, error='axis must be an int'), ]) def testError(self, descr, mode, data, repeats, axis, exception=ValueError, error=None): # Make sure that this is also an error case for numpy. with self.assertRaises(exception): np.repeat(data, repeats, axis) if mode == 'constant': data = constant_op.constant(data) repeats = constant_op.constant(repeats) elif mode == 'dynamic': data = constant_op.constant(data) repeats = constant_op.constant(repeats) data = array_ops.placeholder_with_default(data, data.shape) repeats = array_ops.placeholder_with_default(repeats, repeats.shape) elif mode == 'unknown_shape': data = array_ops.placeholder_with_default(data, None) repeats = array_ops.placeholder_with_default(repeats, None) with self.assertRaisesRegexp(exception, error): ragged_util.repeat(data, repeats, axis) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_util_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Value for RaggedTensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["ragged.RaggedTensorValue"]) class RaggedTensorValue(object): """Represents the value of a `RaggedTensor`. Warning: `RaggedTensorValue` should only be used in graph mode; in eager mode, the `tf.RaggedTensor` class contains its value directly. See `tf.RaggedTensor` for a description of ragged tensors. """ def __init__(self, values, row_splits): """Creates a `RaggedTensorValue`. Args: values: A numpy array of any type and shape; or a RaggedTensorValue. row_splits: A 1-D int32 or int64 numpy array. """ if not (isinstance(row_splits, (np.ndarray, np.generic)) and row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1): raise TypeError("row_splits must be a 1D int32 or int64 numpy array") if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)): raise TypeError("values must be a numpy array or a RaggedTensorValue") if (isinstance(values, RaggedTensorValue) and row_splits.dtype != values.row_splits.dtype): raise ValueError("row_splits and values.row_splits must have " "the same dtype") self._values = values self._row_splits = row_splits row_splits = property( lambda self: self._row_splits, doc="""The split indices for the ragged tensor value.""") values = property( lambda self: self._values, doc="""The concatenated values for all rows in this tensor.""") dtype = property( lambda self: self._values.dtype, doc="""The numpy dtype of values in this tensor.""") @property def flat_values(self): """The innermost `values` array for this ragged tensor value.""" rt_values = self.values while isinstance(rt_values, RaggedTensorValue): rt_values = rt_values.values return rt_values @property def nested_row_splits(self): """The row_splits for all ragged dimensions in this ragged tensor value.""" rt_nested_splits = [self.row_splits] rt_values = self.values while isinstance(rt_values, RaggedTensorValue): rt_nested_splits.append(rt_values.row_splits) rt_values = rt_values.values return tuple(rt_nested_splits) @property def ragged_rank(self): """The number of ragged dimensions in this ragged tensor value.""" values_is_ragged = isinstance(self._values, RaggedTensorValue) return self._values.ragged_rank + 1 if values_is_ragged else 1 @property def shape(self): """A tuple indicating the shape of this RaggedTensorValue.""" return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:] def __str__(self): return "<tf.RaggedTensorValue %s>" % self.to_list() def __repr__(self): return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values, self._row_splits) def to_list(self): """Returns this ragged tensor value as a nested Python list.""" if isinstance(self._values, RaggedTensorValue): values_as_list = self._values.to_list() else: values_as_list = self._values.tolist() return [ values_as_list[self._row_splits[i]:self._row_splits[i + 1]] for i in range(len(self._row_splits) - 1) ]
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor_value.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_placeholder op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedPlaceholderOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ # dtype, ragged_rank, value_shape, name -> expected (dtypes.int32, 0, [5], None, 'Tensor("Placeholder:0", shape=(5,), dtype=int32)'), (dtypes.int32, 1, [], 'ph', 'tf.RaggedTensor(' 'values=Tensor("ph/flat_values:0", shape=(None,), dtype=int32), ' 'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'), (dtypes.string, 1, [5], 'ph', 'tf.RaggedTensor(' 'values=Tensor("ph/flat_values:0", shape=(None, 5), dtype=string), ' 'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'), (dtypes.float32, 2, [], 'ph', 'tf.RaggedTensor(values=tf.RaggedTensor(' 'values=Tensor("ph/flat_values:0", shape=(None,), dtype=float32), ' 'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), ' 'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'), (dtypes.int32, 2, [3, 5], 'ph', 'tf.RaggedTensor(values=tf.RaggedTensor(' 'values=Tensor("ph/flat_values:0", shape=(None, 3, 5), dtype=int32), ' 'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), ' 'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'), ]) def testRaggedPlaceholder(self, dtype, ragged_rank, value_shape, name, expected): if not context.executing_eagerly(): placeholder = ragged_factory_ops.placeholder( dtype, ragged_rank, value_shape, name) result = str(placeholder).replace('?', 'None') self.assertEqual(result, expected) def testRaggedPlaceholderRaisesExceptionInEagerMode(self): if context.executing_eagerly(): with self.assertRaises(RuntimeError): ragged_factory_ops.placeholder(dtypes.int32, 1, []) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_placeholder_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.bounding_shape.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedTensorBoundingShapeOp(test_util.TensorFlowTestCase): def testDocStringExample(self): # This is the example from ragged.bounding_shape.__doc__. rt = ragged_factory_ops.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]]) self.assertAllEqual(rt.bounding_shape(), [5, 4]) def test2DRaggedTensorWithOneRaggedDimension(self): values = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7]) rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7]) rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7]) self.assertAllEqual(rt1.bounding_shape(), [5, 3]) self.assertAllEqual(rt2.bounding_shape(), [1, 7]) self.assertAllEqual(rt3.bounding_shape(), [3, 7]) def test3DRaggedTensorWithOneRaggedDimension(self): values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]] rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7]) rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7]) rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7]) self.assertAllEqual(rt1.bounding_shape(), [5, 3, 2]) self.assertAllEqual(rt2.bounding_shape(), [1, 7, 2]) self.assertAllEqual(rt3.bounding_shape(), [3, 7, 2]) def testExplicitAxisOptimizations(self): rt = ragged_tensor.RaggedTensor.from_row_splits(b'a b c d e f g'.split(), [0, 2, 5, 6, 6, 7]) self.assertAllEqual(rt.bounding_shape(0), 5) self.assertAllEqual(rt.bounding_shape(1), 3) self.assertAllEqual(rt.bounding_shape([1, 0]), [3, 5]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor_bounding_shape_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.gather.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_gather_ops from tensorflow.python.platform import googletest class RaggedGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): def testDocStringExamples(self): params = constant_op.constant(['a', 'b', 'c', 'd', 'e']) indices = constant_op.constant([3, 1, 2, 1, 0]) ragged_params = ragged_factory_ops.constant([['a', 'b', 'c'], ['d'], [], ['e']]) ragged_indices = ragged_factory_ops.constant([[3, 1, 2], [1], [], [0]]) self.assertAllEqual( ragged_gather_ops.gather(params, ragged_indices), [[b'd', b'b', b'c'], [b'b'], [], [b'a']]) self.assertAllEqual( ragged_gather_ops.gather(ragged_params, indices), [[b'e'], [b'd'], [], [b'd'], [b'a', b'b', b'c']]) self.assertAllEqual( ragged_gather_ops.gather(ragged_params, ragged_indices), [[[b'e'], [b'd'], []], [[b'd']], [], [[b'a', b'b', b'c']]]) def testTensorParamsAndTensorIndices(self): params = ['a', 'b', 'c', 'd', 'e'] indices = [2, 0, 2, 1] self.assertAllEqual( ragged_gather_ops.gather(params, indices), [b'c', b'a', b'c', b'b']) self.assertIsInstance(ragged_gather_ops.gather(params, indices), ops.Tensor) def testRaggedParamsAndTensorIndices(self): params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']]) indices = [2, 0, 2, 1] self.assertAllEqual( ragged_gather_ops.gather(params, indices), [[b'f'], [b'a', b'b'], [b'f'], [b'c', b'd', b'e']]) def testTensorParamsAndRaggedIndices(self): params = ['a', 'b', 'c', 'd', 'e'] indices = ragged_factory_ops.constant([[2, 1], [1, 2, 0], [3]]) self.assertAllEqual( ragged_gather_ops.gather(params, indices), [[b'c', b'b'], [b'b', b'c', b'a'], [b'd']]) def testRaggedParamsAndRaggedIndices(self): params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']]) indices = ragged_factory_ops.constant([[2, 1], [1, 2, 0], [3]]) self.assertAllEqual( ragged_gather_ops.gather(params, indices), [[[b'f'], [b'c', b'd', b'e']], # [[p[2], p[1] ], [[b'c', b'd', b'e'], [b'f'], [b'a', b'b']], # [p[1], p[2], p[0]], [[]]] # [p[3] ]] ) # pyformat: disable def testRaggedParamsAndScalarIndices(self): params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [], ['g']]) indices = 1 self.assertAllEqual( ragged_gather_ops.gather(params, indices), [b'c', b'd', b'e']) def test3DRaggedParamsAnd2DTensorIndices(self): params = ragged_factory_ops.constant([[['a', 'b'], []], [['c', 'd'], ['e'], ['f']], [['g']]]) indices = [[1, 2], [0, 1], [2, 2]] self.assertAllEqual( ragged_gather_ops.gather(params, indices), [[[[b'c', b'd'], [b'e'], [b'f']], [[b'g']]], # [[p1, p2], [[[b'a', b'b'], []], [[b'c', b'd'], [b'e'], [b'f']]], # [p0, p1], [[[b'g']], [[b'g']]]] # [p2, p2]] ) # pyformat: disable def testTensorParamsAnd4DRaggedIndices(self): indices = ragged_factory_ops.constant( [[[[3, 4], [0, 6]], []], [[[2, 1], [1, 0]], [[2, 5]], [[2, 3]]], [[[1, 0]]]], # pyformat: disable ragged_rank=2, inner_shape=(2,)) params = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] self.assertAllEqual( ragged_gather_ops.gather(params, indices), [[[[b'd', b'e'], [b'a', b'g']], []], [[[b'c', b'b'], [b'b', b'a']], [[b'c', b'f']], [[b'c', b'd']]], [[[b'b', b'a']]]]) # pyformat: disable def testOutOfBoundsError(self): tensor_params = ['a', 'b', 'c'] tensor_indices = [0, 1, 2] ragged_params = ragged_factory_ops.constant([['a', 'b'], ['c']]) ragged_indices = ragged_factory_ops.constant([[0, 3]]) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'indices\[1\] = 3 is not in \[0, 3\)'): self.evaluate(ragged_gather_ops.gather(tensor_params, ragged_indices)) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'indices\[2\] = 2 is not in \[0, 2\)'): self.evaluate(ragged_gather_ops.gather(ragged_params, tensor_indices)) with self.assertRaisesRegexp(errors.InvalidArgumentError, r'indices\[1\] = 3 is not in \[0, 2\)'): self.evaluate(ragged_gather_ops.gather(ragged_params, ragged_indices)) def testUnknownIndicesRankError(self): if context.executing_eagerly(): return params = ragged_factory_ops.constant([], ragged_rank=1) indices = constant_op.constant([0], dtype=dtypes.int64) indices = array_ops.placeholder_with_default(indices, None) self.assertRaisesRegexp(ValueError, r'indices\.shape\.ndims must be known statically', ragged_gather_ops.gather, params, indices) # pylint: disable=bad-whitespace @parameterized.parameters([ # params.shape=[2, None]; indices.shape=[3] dict( params = [[1.0, 2.0], [3.0, 4.0, 5.0]], indices = [0, 0, 1], expected_out = [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0, 5.0]], out_grad = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6, 0.7]], expected_grad = [[0.4, 0.6], [0.5, 0.6, 0.7]]), # params.shape=[2, None]; indices.shape=[0] dict( params = [[1, 2], [3, 4, 5]], indices = [], expected_out = [], out_grad = [], expected_grad = [[0, 0], [0, 0, 0]]), # params.shape=[2, None]; indices.shape=[2, 2] dict( params = [[1.0, 2.0], [3.0, 4.0, 5.0]], indices = [[0, 0], [1, 0]], expected_out = [[[1.0, 2.0], [1.0, 2.0]], [[3.0, 4.0, 5.0], [1.0, 2.0]]], out_grad = [[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6, 0.7], [0.8, 0.9]]], expected_grad = [[1.2, 1.5], [0.5, 0.6, 0.7]]), # params.shape=[3, None, None]; indices.shape=[3] dict( params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]], indices = [2, 1, 2], expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]], out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]], expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]), # params.shape=[3, None, None]; indices.shape=[0] dict( params = [[[1, 2], [3, 4, 5]], [[6.0]], [[7.0, 8.0]]], indices = [2, 1, 2], expected_out = [[[7.0, 8.0]], [[6.0]], [[7.0, 8.0]]], out_grad = [[[0.1, 0.2]], [[0.3]], [[0.4, 0.5]]], expected_grad = [[[0, 0], [0, 0, 0]], [[0.3]], [[0.5, 0.7]]]), # params.shape=[0, None]; indices.shape=[0] dict( params = [], indices = [], expected_out = [], out_grad = [], expected_grad = [], params_ragged_rank = 1), # params.shape=[2, None, 2]; indices.shape=[3] dict( params = [[[1, 2], [3, 4]], [], [[5, 6]]], indices = [1, 1, 2, 0, 2], expected_out = [[], [], [[5, 6]], [[1, 2], [3, 4]], [[5, 6]]], out_grad = [[], [], [[1, 2]], [[3, 4], [5, 6]], [[7, 7]]], expected_grad = [[[3, 4], [5, 6]], [], [[8, 9]]], params_ragged_rank = 1), ]) # pyformat: disable @test_util.run_deprecated_v1 def testGradient(self, params, indices, expected_out, out_grad, expected_grad, params_ragged_rank=None): """Tests that ragged_gather generates the right gradient. Args: params: The `params` that should be passed to `gather`. indices: The `indices` that should be passed to `gather`. expected_out: The expected value of `gather(params, indices)`. `expected_out.shape = indices.shape + params.shape[1:]`. out_grad: The value that should be fed in as the gradient for `out` when testing the gradient of `ragged_gather`. Must have the same shape as `expected_out`. expected_grad: The expected gradient for that should be returned for `params`. Must have hte same shape as `params`. params_ragged_rank: The ragged_rank of `params`. """ if context.executing_eagerly(): return params = ragged_factory_ops.constant( params, dtype=dtypes.float32, ragged_rank=params_ragged_rank) indices = constant_op.constant(indices, dtype=dtypes.int32) out_ragged_rank = params.ragged_rank + indices.shape.ndims - 1 out_grad = ragged_factory_ops.constant( out_grad, dtype=dtypes.float32, ragged_rank=out_ragged_rank) expected_out = ragged_factory_ops.constant( expected_out, dtype=dtypes.float32, ragged_rank=out_ragged_rank) expected_grad = ragged_factory_ops.constant( expected_grad, dtype=dtypes.float32, ragged_rank=params.ragged_rank) out = ragged_gather_ops.gather(params, indices) self.assertAllClose(out, expected_out) grads = gradients_impl.gradients( out.flat_values, (params.nested_row_splits + (params.flat_values, indices,)), out_grad.flat_values) param_nested_splits_grads = grads[:-2] params_flat_values_grad = grads[-2] indices_grad = grads[-1] self.assertEqual(indices_grad, None) for splits_grad in param_nested_splits_grads: self.assertEqual(splits_grad, None) # The gradient generates an IndexedSlices; convert back to a normal Tensor. self.assertIsInstance(params_flat_values_grad, indexed_slices.IndexedSlices) params_flat_values_grad = ops.convert_to_tensor(params_flat_values_grad) params_grad = params.with_flat_values(params_flat_values_grad) self.assertAllClose(params_grad, expected_grad, atol=2e-6, rtol=2e-6) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_gather_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged.size.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedSizeOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): @parameterized.parameters([ {'size': 1, 'test_input': 1}, {'size': 0, 'test_input': []}, {'size': 0, 'test_input': [], 'ragged_rank': 1}, {'size': 3, 'test_input': [1, 1, 1]}, {'size': 3, 'test_input': [[1, 1], [1]]}, {'size': 5, 'test_input': [[[1, 1, 1], [1]], [[1]]]}, {'size': 6, 'test_input': [[[1, 1], [1, 1]], [[1, 1]]], 'ragged_rank': 1}, ]) def testRaggedSize(self, test_input, size, ragged_rank=None): input_rt = ragged_factory_ops.constant(test_input, ragged_rank=ragged_rank) self.assertAllEqual(ragged_array_ops.size(input_rt), size) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_size_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ragged_array_ops.concat.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.platform import googletest @test_util.run_all_in_graph_and_eager_modes class RaggedConcatOpTest(test_util.TensorFlowTestCase, parameterized.TestCase): def _rt_inputs_to_tensors(self, rt_inputs, ragged_ranks=None): if ragged_ranks is None: ragged_ranks = [None] * len(rt_inputs) return [ # pylint: disable=g-long-ternary ragged_factory_ops.constant(rt_input, ragged_rank=rrank) if rrank != 0 else constant_op.constant(rt_input) for (rt_input, rrank) in zip(rt_inputs, ragged_ranks) ] @parameterized.parameters( dict( descr='Two rank-2 inputs with empty value axis=1', rt_inputs=([[]], [[]]), axis=1, expected=[[]]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=0', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None) [['b00'], ['b10']]), # shape=(2, None) axis=0, expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'], [b'b10']]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None) axis=1, expected=[ [b'a00', b'a01', b'b00'], [b'b10', b'b11', b'b12'], [b'a20', b'a21', b'a22', b'b20']]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=-2', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None) [['b00'], ['b10']]), # shape=(2, None) axis=-2, expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'], [b'b10']]), dict( descr='Two rank-2 inputs (ragged_rank=1), axis=-1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None) axis=-1, expected=[ [b'a00', b'a01', b'b00'], [b'b10', b'b11', b'b12'], [b'a20', b'a21', b'a22', b'b20']], expected_shape=[3, None]), dict( descr='Three rank-2 inputs (ragged_rank=1), axis=0', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10']], # shape=(2, None) [['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None) axis=0, expected=[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22'], [b'b00'], [b'b10'], [b'c00'], [b'c10', b'c11'], [b'c21']]), dict( descr='Three rank-2 inputs (ragged_rank=1), axis=1', rt_inputs=( [['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None) [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None) [[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None) axis=1, expected=[ [b'a00', b'a01', b'b00'], [b'b10', b'b11', b'b12', b'c10', b'c11'], [b'a20', b'a21', b'a22', b'b20', b'c20', b'c21']]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=0', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[['b000']], [['b100', 'b101'], ['b110']]], [[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]), axis=0, expected=[ [[b'a000', b'a001'], [b'a010']], [[b'a100', b'a101', b'a102'], [b'a110', b'a111']], [[b'b000']], [[b'b100', b'b101'], [b'b110']], [], [[b'c100', b'c101', b'c102', b'c103']], [[], [b'c210', b'c211']]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=1', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[['b000']], [['b100', 'b101'], ['b110']]], [[], [[], ['c110', 'c111']]]), axis=1, expected=[ [[b'a000', b'a001'], [b'a010'], [b'b000']], [[b'a100', b'a101', b'a102'], [b'a110', b'a111'], [b'b100', b'b101'], [b'b110'], [], [b'c110', b'c111']]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=2', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]], [[['c000'], ['c010']], [[], ['c110', 'c111']]]), axis=2, expected=[ [[b'a000', b'a001', b'c000'], [b'a010', b'b010', b'b011', b'c010']], [[b'a100', b'a101', b'a102', b'b100', b'b101'], [b'a110', b'a111', b'b110', b'c110', b'c111']]]), dict( descr='Three rank-3 inputs (ragged_rank=2), axis=-1', rt_inputs=( [[['a000', 'a001'], ['a010']], [['a100', 'a101', 'a102'], ['a110', 'a111']]], [[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]], [[['c000'], ['c010']], [[], ['c110', 'c111']]]), axis=-1, expected=[ [[b'a000', b'a001', b'c000'], [b'a010', b'b010', b'b011', b'c010']], [[b'a100', b'a101', b'a102', b'b100', b'b101'], [b'a110', b'a111', b'b110', b'c110', b'c111']]]), dict( descr='ragged_concat([uniform, ragged, uniform], axis=1)', ragged_ranks=[0, 1, 0], rt_inputs=( [['0('], ['1('], ['2(']], # shape=(3, 1) [['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None) [[')0'], [')1'], [')2']]), # shape=(3, 1) axis=1, expected=[ [b'0(', b'b00', b')0'], [b'1(', b'b10', b'b11', b'b12', b')1'], [b'2(', b'b20', b')2']]), dict( descr='ragged_concat([uniform, uniform], axis=0)', ragged_ranks=[0, 0], rt_inputs=( [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2) [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3) axis=0, expected=[ [b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'], [b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']], expected_ragged_rank=1), dict( descr='ragged_concat([uniform, ragged], axis=0)', ragged_ranks=[0, 1], rt_inputs=( [['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2) [['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3) axis=0, expected=[ [b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'], [b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]), dict( descr='ragged_concat([uniform, ragged], axis=0) with rank-3 inputs', ragged_ranks=[0, 2], rt_inputs=( [[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2) [[[8], [8, 8]]]), # shape = (2, None, None) axis=0, expected=[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], [8, 8]]]), dict( descr='Two rank-3 inputs with ragged_rank=1, axis=-1', ragged_ranks=[1, 1], rt_inputs=( [[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]], [[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]), axis=-1, expected=[ [[0, 1, 9, 8], [2, 3, 7, 6], [4, 5, 5, 4]], [], [[6, 7, 3, 2], [8, 9, 1, 0]]], expected_ragged_rank=1), dict( descr='ragged_concat([vector, vector], axis=0)', ragged_ranks=[0, 0], rt_inputs=([1, 2, 3], [4, 5, 6]), axis=0, expected=[1, 2, 3, 4, 5, 6]), dict( descr='One input (so ragged_conat is a noop)', rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],), axis=0, expected=[[b'a00', b'a01'], [], [b'a20', b'a21']]), ) # pyformat: disable def testRaggedConcat(self, descr, rt_inputs, axis, expected, ragged_ranks=None, expected_ragged_rank=None, expected_shape=None): rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks) concatenated = ragged_concat_ops.concat(rt_inputs, axis) if expected_ragged_rank is not None: self.assertEqual(concatenated.ragged_rank, expected_ragged_rank) if expected_shape is not None: self.assertEqual(concatenated.shape.as_list(), expected_shape) self.assertAllEqual(concatenated, expected) @parameterized.parameters( dict( rt_inputs=(), axis=0, error=ValueError, message=r'rt_inputs may not be empty\.'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=r'foo', error=TypeError, message='axis must be an int'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=-3, error=ValueError, message='axis=-3 out of bounds: expected -2<=axis<2'), dict( rt_inputs=([[1, 2]], [[3, 4]]), axis=2, error=ValueError, message='axis=2 out of bounds: expected -2<=axis<2'), dict( ragged_ranks=(0, 0), rt_inputs=([[1, 2]], [[3, 4], [5, 6]]), axis=1, error=(ValueError, errors.InvalidArgumentError)), ) def testStaticError(self, rt_inputs, axis, error, message=None, ragged_ranks=None): rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks) self.assertRaisesRegexp(error, message, ragged_concat_ops.concat, rt_inputs, axis) @parameterized.parameters([ dict( ragged_ranks=(1, 1), rt_inputs=([[1, 2]], [[3, 4], [5, 6]]), axis=1, error=errors.InvalidArgumentError, message='Input tensors have incompatible shapes'), ]) def testRuntimeError(self, rt_inputs, axis, error, message, ragged_ranks=None): if context.executing_eagerly(): return rt_inputs = [ array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs ] concatenated = ragged_concat_ops.concat(rt_inputs, axis) with self.assertRaisesRegexp(error, message): self.evaluate(concatenated) def testNegativeAxisWithUnknownRankError(self): if context.executing_eagerly(): return rt_inputs = [ array_ops.placeholder(dtypes.int64), array_ops.placeholder(dtypes.int64) ] self.assertRaisesRegexp( ValueError, r'axis may only be negative if ndims is statically known.', ragged_concat_ops.concat, rt_inputs, -1) def testSingleTensorInput(self): """Tests ragged_concat with a single tensor input. Usually, we pass a list of values in for rt_inputs. However, you can also pass in a single value (as with tf.concat), in which case it simply returns that tensor. This test exercises that path. """ rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]]) concatenated = ragged_concat_ops.concat(rt_inputs, 0) self.assertAllEqual(concatenated, [[1, 2], [3, 4]]) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_concat_op_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the b"License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an b"AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Tensorflow strings.ngrams op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_string_ops from tensorflow.python.platform import test class StringNgramsTest(test_util.TensorFlowTestCase): def test_unpadded_ngrams(self): data = [[b"aa", b"bb", b"cc", b"dd"], [b"ee", b"ff"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[b"aa|bb|cc", b"bb|cc|dd"], []] self.assertAllEqual(expected_ngrams, result) def test_tuple_multi_ngrams(self): data = [[b"aa", b"bb", b"cc", b"dd"], [b"ee", b"ff"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=(2, 3), separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[b"aa|bb", b"bb|cc", b"cc|dd", b"aa|bb|cc", b"bb|cc|dd"], [b"ee|ff"]] self.assertAllEqual(expected_ngrams, result) def test_tuple_multi_ngrams_inverted_order(self): data = [[b"aa", b"bb", b"cc", b"dd"], [b"ee", b"ff"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=(3, 2), separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[b"aa|bb|cc", b"bb|cc|dd", b"aa|bb", b"bb|cc", b"cc|dd"], [b"ee|ff"]] self.assertAllEqual(expected_ngrams, result) def test_list_multi_ngrams(self): data = [[b"aa", b"bb", b"cc", b"dd"], [b"ee", b"ff"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=[2, 3], separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[b"aa|bb", b"bb|cc", b"cc|dd", b"aa|bb|cc", b"bb|cc|dd"], [b"ee|ff"]] self.assertAllEqual(expected_ngrams, result) def test_multi_ngram_ordering(self): data = [[b"aa", b"bb", b"cc", b"dd"], [b"ee", b"ff"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=[3, 2], separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[b"aa|bb|cc", b"bb|cc|dd", b"aa|bb", b"bb|cc", b"cc|dd"], [b"ee|ff"]] self.assertAllEqual(expected_ngrams, result) def test_fully_padded_ngrams(self): data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP")) result = self.evaluate(ngram_op) expected_ngrams = [ [b"LP|LP|a", b"LP|a|RP", b"a|RP|RP"], # 0 [b"LP|LP|b", b"LP|b|c", b"b|c|d", b"c|d|RP", b"d|RP|RP"], # 1 [b"LP|LP|e", b"LP|e|f", b"e|f|RP", b"f|RP|RP"] # 2 ] self.assertAllEqual(expected_ngrams, result) def test_ngram_padding_size_cap(self): # Validate that the padding size is never greater than ngram_size - 1. data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP"), padding_width=10) result = self.evaluate(ngram_op) expected_ngrams = [ [b"LP|LP|a", b"LP|a|RP", b"a|RP|RP"], # 0 [b"LP|LP|b", b"LP|b|c", b"b|c|d", b"c|d|RP", b"d|RP|RP"], # 1 [b"LP|LP|e", b"LP|e|f", b"e|f|RP", b"f|RP|RP"] # 2 ] self.assertAllEqual(expected_ngrams, result) def test_singly_padded_ngrams(self): data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=5, separator=b"|", pad_values=(b"LP", b"RP"), padding_width=1) result = self.evaluate(ngram_op) expected_ngrams = [[], [b"LP|b|c|d|RP"], []] self.assertAllEqual(expected_ngrams, result) def test_singly_padded_ngrams_with_preserve_short(self): data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=5, separator=b"|", pad_values=(b"LP", b"RP"), padding_width=1, preserve_short_sequences=True) result = self.evaluate(ngram_op) expected_ngrams = [[b"LP|a|RP"], [b"LP|b|c|d|RP"], [b"LP|e|f|RP"]] self.assertAllEqual(expected_ngrams, result) def test_singly_padded_multiple_ngrams(self): data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=(1, 5), separator=b"|", pad_values=(b"LP", b"RP"), padding_width=1) result = self.evaluate(ngram_op) expected_ngrams = [[b"a"], [b"b", b"c", b"d", b"LP|b|c|d|RP"], [b"e", b"f"]] self.assertAllEqual(expected_ngrams, result) def test_single_padding_string(self): data = [[b"a"], [b"b", b"c", b"d"], [b"e", b"f"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=5, separator=b"|", pad_values=b"[PAD]", padding_width=1) result = self.evaluate(ngram_op) expected_ngrams = [[], [b"[PAD]|b|c|d|[PAD]"], []] self.assertAllEqual(expected_ngrams, result) def test_explicit_multiply_padded_ngrams(self): data = [[b"a"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=5, separator=b"|", pad_values=(b"LP", b"RP"), padding_width=2) result = self.evaluate(ngram_op) expected_ngrams = [[b"LP|LP|a|RP|RP"]] self.assertAllEqual(expected_ngrams, result) def test_ragged_inputs_with_multiple_ragged_dimensions(self): data = [[[[b"aa", b"bb", b"cc", b"dd"]], [[b"ee", b"ff"]]]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[[[b"aa|bb|cc", b"bb|cc|dd"]], [[]]]] self.assertAllEqual(expected_ngrams, result) def test_ragged_inputs_with_multiple_ragged_dimensions_and_preserve(self): data = [[[[b"aa", b"bb", b"cc", b"dd"]], [[b"ee", b"ff"]]]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", preserve_short_sequences=True) result = self.evaluate(ngram_op) expected_ngrams = [[[[b"aa|bb|cc", b"bb|cc|dd"]], [[b"ee|ff"]]]] self.assertAllEqual(expected_ngrams, result) def test_ragged_inputs_with_multiple_ragged_dimensions_bigrams(self): data = [[[[b"aa", b"bb", b"cc", b"dd"]], [[b"ee", b"ff"]]]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=2, separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[[[b"aa|bb", b"bb|cc", b"cc|dd"]], [[b"ee|ff"]]]] self.assertAllEqual(expected_ngrams, result) def test_ragged_inputs_with_multiple_ragged_dimensions_and_multiple_ngrams( self): data = [[[[b"aa", b"bb", b"cc", b"dd"]], [[b"ee", b"ff"]]]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=(3, 4), separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[[[b"aa|bb|cc", b"bb|cc|dd", b"aa|bb|cc|dd"]], [[]]]] self.assertAllEqual(expected_ngrams, result) def test_dense_input_rank_3(self): data = [[[b"a", b"z"], [b"b", b""]], [[b"b", b""], [b"e", b"f"]]] data_tensor = constant_op.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP")) result = self.evaluate(ngram_op) expected_ngrams = [[[b"LP|LP|a", b"LP|a|z", b"a|z|RP", b"z|RP|RP"], [b"LP|LP|b", b"LP|b|", b"b||RP", b"|RP|RP"]], [[b"LP|LP|b", b"LP|b|", b"b||RP", b"|RP|RP"], [b"LP|LP|e", b"LP|e|f", b"e|f|RP", b"f|RP|RP"]]] self.assertIsInstance(ngram_op, ops.Tensor) self.assertAllEqual(expected_ngrams, result) def test_dense_input(self): data = [[b"a", b"z"], [b"b", b""], [b"e", b"f"]] data_tensor = constant_op.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP")) result = self.evaluate(ngram_op) expected_ngrams = [ [b"LP|LP|a", b"LP|a|z", b"a|z|RP", b"z|RP|RP"], [b"LP|LP|b", b"LP|b|", b"b||RP", b"|RP|RP"], [b"LP|LP|e", b"LP|e|f", b"e|f|RP", b"f|RP|RP"], ] self.assertIsInstance(ngram_op, ops.Tensor) self.assertAllEqual(expected_ngrams, result) def test_input_list_input(self): data = [[b"a", b"z"], [b"b", b""], [b"e", b"f"]] ngram_op = ragged_string_ops.ngrams( data, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP")) result = self.evaluate(ngram_op) expected_ngrams = [ [b"LP|LP|a", b"LP|a|z", b"a|z|RP", b"z|RP|RP"], [b"LP|LP|b", b"LP|b|", b"b||RP", b"|RP|RP"], [b"LP|LP|e", b"LP|e|f", b"e|f|RP", b"f|RP|RP"], ] self.assertAllEqual(expected_ngrams, result) def test_vector_input(self): data = [b"a", b"z"] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=3, separator=b"|", pad_values=(b"LP", b"RP")) result = self.evaluate(ngram_op) expected_ngrams = [b"LP|LP|a", b"LP|a|z", b"a|z|RP", b"z|RP|RP"] self.assertAllEqual(expected_ngrams, result) def test_dense_input_with_multiple_ngrams(self): data = [[b"a", b"b", b"c", b"d"], [b"e", b"f", b"g", b"h"]] data_tensor = ragged_factory_ops.constant(data) ngram_op = ragged_string_ops.ngrams( data_tensor, ngram_width=(1, 2, 3), separator=b"|") result = self.evaluate(ngram_op) expected_ngrams = [[ b"a", b"b", b"c", b"d", b"a|b", b"b|c", b"c|d", b"a|b|c", b"b|c|d" ], [b"e", b"f", b"g", b"h", b"e|f", b"f|g", b"g|h", b"e|f|g", b"f|g|h"]] self.assertAllEqual(expected_ngrams, result) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/string_ngrams_op_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for third_party.tensorflow.python.ops.ragged_tensor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from absl.testing import parameterized import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensorSpec from tensorflow.python.platform import googletest class _SliceBuilder(object): """Helper to construct arguments for __getitem__. Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>. """ def __getitem__(self, slice_spec): return slice_spec SLICE_BUILDER = _SliceBuilder() def _make_tensor_slice_spec(slice_spec, use_constant=True): """Wraps all integers in an extended slice spec w/ a tensor. This function is used to help test slicing when the slice spec contains tensors, rather than integers. Args: slice_spec: The extended slice spec. use_constant: If true, then wrap each integer with a tf.constant. If false, then wrap each integer with a tf.placeholder. Returns: A copy of slice_spec, but with each integer i replaced with tf.constant(i). """ def make_piece_scalar(piece): if isinstance(piece, int): scalar = constant_op.constant(piece) if use_constant: return scalar else: return array_ops.placeholder_with_default(scalar, []) elif isinstance(piece, slice): return slice( make_piece_scalar(piece.start), make_piece_scalar(piece.stop), make_piece_scalar(piece.step)) else: return piece if isinstance(slice_spec, tuple): return tuple(make_piece_scalar(piece) for piece in slice_spec) else: return make_piece_scalar(slice_spec) # Example 2D ragged tensor value with one ragged dimension and with scalar # values, expressed as nested python lists and as splits+values. EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [], [b'g']] EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7] EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g'] # Example 4D ragged tensor value, with two ragged dimensions and with values # whose shape is [2], expressed as nested python lists and as splits+values. EXAMPLE_RAGGED_TENSOR_4D = [ [ # rt[0] [[1, 2], [3, 4], [5, 6]], # rt[0][0] [[7, 8], [9, 10], [11, 12]]], # rt[0][1] [], # rt[1] [ # rt[2] [[13, 14], [15, 16], [17, 18]]], # rt[2][0] [ # rt[3] [[19, 20]]] # rt[3][0] ] # pyformat: disable EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4] EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10] EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]] @test_util.run_all_in_graph_and_eager_modes class RaggedTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase): longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name #============================================================================= # RaggedTensor class docstring examples #============================================================================= def testClassDocStringExamples(self): # From section: "Component Tensors" rt = RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) self.assertAllEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []]) del rt # From section: "Alternative Row-Partitioning Schemes" values = [3, 1, 4, 1, 5, 9, 2, 6] rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]) rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]) rt3 = RaggedTensor.from_value_rowids( values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5) rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]) rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]) for rt in (rt1, rt2, rt3, rt4, rt5): self.assertAllEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []]) del rt1, rt2, rt3, rt4, rt5 # From section: "Multiple Ragged Dimensions" inner_rt = RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8]) outer_rt = RaggedTensor.from_row_splits( values=inner_rt, row_splits=[0, 3, 3, 5]) self.assertEqual(outer_rt.ragged_rank, 2) self.assertAllEqual( outer_rt, [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) del inner_rt, outer_rt # From section: "Multiple Ragged Dimensions" rt = RaggedTensor.from_nested_row_splits( flat_values=[3, 1, 4, 1, 5, 9, 2, 6], nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])) self.assertAllEqual( rt, [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]) del rt # From section: "Uniform Inner Dimensions" rt = RaggedTensor.from_row_splits( values=array_ops.ones([5, 3]), row_splits=[0, 2, 5]) self.assertAllEqual( rt, [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]) self.assertEqual(rt.shape.as_list(), [2, None, 3]) del rt #============================================================================= # RaggedTensorValue Constructor #============================================================================= def testRaggedTensorValueConstruction(self): values = np.array(b'a b c d e f g'.split()) splits = np.array([0, 2, 5, 6, 6, 7], dtype=np.int64) splits2 = np.array([0, 3, 5], dtype=np.int64) # Test construction of a RaggedTensorValue with ragged_rank=1. rt_value = ragged_tensor_value.RaggedTensorValue(values, splits) self.assertEqual(rt_value.row_splits.dtype, np.int64) self.assertEqual(rt_value.shape, (5, None)) self.assertLen(rt_value.nested_row_splits, 1) self.assertAllEqual(splits, rt_value.row_splits) self.assertAllEqual(values, rt_value.values) self.assertAllEqual(splits, rt_value.nested_row_splits[0]) self.assertAllEqual(values, rt_value.flat_values) # Test construction of a RaggedTensorValue with ragged_rank=2. rt_value = ragged_tensor_value.RaggedTensorValue( values=ragged_tensor_value.RaggedTensorValue(values, splits), row_splits=splits2) self.assertEqual(rt_value.row_splits.dtype, np.int64) self.assertEqual(rt_value.shape, (2, None, None)) self.assertLen(rt_value.nested_row_splits, 2) self.assertAllEqual(splits2, rt_value.row_splits) self.assertAllEqual(splits, rt_value.values.row_splits) self.assertAllEqual(splits2, rt_value.nested_row_splits[0]) self.assertAllEqual(splits, rt_value.nested_row_splits[1]) self.assertAllEqual(values, rt_value.values.values) self.assertAllEqual(values, rt_value.flat_values) #============================================================================= # RaggedTensor Constructor (private) #============================================================================= def testRaggedTensorConstruction(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) rt = RaggedTensor(values=values, row_splits=row_splits, internal=True) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testRaggedTensorConstructionErrors(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) with self.assertRaisesRegexp(ValueError, 'RaggedTensor constructor is private'): RaggedTensor(values=values, row_splits=row_splits) with self.assertRaisesRegexp(TypeError, 'values must be a Tensor or RaggedTensor'): RaggedTensor(values=range(7), row_splits=row_splits, internal=True) with self.assertRaisesRegexp(TypeError, 'Row-partitioning argument must be a Tensor'): RaggedTensor(values=values, row_splits=[0, 2, 2, 5, 6, 7], internal=True) with self.assertRaisesRegexp(ValueError, r'Shape \(6, 1\) must have rank 1'): RaggedTensor( values=values, row_splits=array_ops.expand_dims(row_splits, 1), internal=True) with self.assertRaisesRegexp(TypeError, 'Cached value must be a Tensor or None.'): RaggedTensor( values=values, row_splits=row_splits, cached_row_lengths=[2, 3, 4], internal=True) #============================================================================= # RaggedTensor Factory Ops #============================================================================= def testFromValueRowIdsWithDerivedNRows(self): # nrows is known at graph creation time. values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids self.assertAllEqual(rt_value_rowids, value_rowids) self.assertAllEqual(rt_nrows, 5) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromValueRowIdsWithDerivedNRowsDynamic(self): # nrows is not known at graph creation time. values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) value_rowids = array_ops.placeholder_with_default(value_rowids, shape=None) rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False) self.assertEqual(rt.dtype, dtypes.string) if context.executing_eagerly(): self.assertEqual(rt.shape.as_list(), [5, None]) else: self.assertEqual(rt.shape.as_list(), [None, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids self.assertAllEqual(rt_value_rowids, value_rowids) self.assertAllEqual(rt_nrows, 5) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromValueRowIdsWithExplicitNRows(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) nrows = constant_op.constant(7, dtypes.int64) rt = RaggedTensor.from_value_rowids(values, value_rowids, nrows, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [7, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids self.assertIs(rt_nrows, nrows) # cached_nrows self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g'], [], []]) def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) nrows = constant_op.constant(5, dtypes.int64) rt = RaggedTensor.from_value_rowids(values, value_rowids, nrows, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids self.assertIs(rt_nrows, nrows) # cached_nrows self.assertAllEqual(rt_value_rowids, value_rowids) self.assertAllEqual(rt_nrows, nrows) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromValueRowIdsWithEmptyValues(self): rt = RaggedTensor.from_value_rowids([], []) rt_nrows = rt.nrows() self.assertEqual(rt.dtype, dtypes.float32) self.assertEqual(rt.shape.as_list(), [0, None]) self.assertEqual(rt.ragged_rank, 1) self.assertEqual(rt.values.shape.as_list(), [0]) self.assertEqual(rt.value_rowids().shape.as_list(), [0]) self.assertAllEqual(rt_nrows, 0) self.assertAllEqual(rt, []) def testFromRowSplits(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) rt = RaggedTensor.from_row_splits(values, row_splits, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_row_splits = rt.row_splits rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_row_splits, row_splits) self.assertAllEqual(rt_nrows, 5) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromRowSplitsWithDifferentSplitTypes(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) splits1 = [0, 2, 2, 5, 6, 7] splits2 = np.array([0, 2, 2, 5, 6, 7], np.int64) splits3 = np.array([0, 2, 2, 5, 6, 7], np.int32) splits4 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) splits5 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int32) rt1 = RaggedTensor.from_row_splits(values, splits1) rt2 = RaggedTensor.from_row_splits(values, splits2) rt3 = RaggedTensor.from_row_splits(values, splits3) rt4 = RaggedTensor.from_row_splits(values, splits4) rt5 = RaggedTensor.from_row_splits(values, splits5) self.assertEqual(rt1.row_splits.dtype, dtypes.int64) self.assertEqual(rt2.row_splits.dtype, dtypes.int64) self.assertEqual(rt3.row_splits.dtype, dtypes.int32) self.assertEqual(rt4.row_splits.dtype, dtypes.int64) self.assertEqual(rt5.row_splits.dtype, dtypes.int32) def testFromRowSplitsWithEmptySplits(self): err_msg = 'row_splits tensor may not be empty' with self.assertRaisesRegexp(ValueError, err_msg): RaggedTensor.from_row_splits([], []) def testFromRowStarts(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64) rt = RaggedTensor.from_row_starts(values, row_starts, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_row_starts = rt.row_starts() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertAllEqual(rt_nrows, 5) self.assertAllEqual(rt_row_starts, row_starts) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromRowLimits(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_limits = constant_op.constant([2, 2, 5, 6, 7], dtypes.int64) rt = RaggedTensor.from_row_limits(values, row_limits, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_row_limits = rt.row_limits() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertAllEqual(rt_nrows, 5) self.assertAllEqual(rt_row_limits, row_limits) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromRowLengths(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_lengths = constant_op.constant([2, 0, 3, 1, 1], dtypes.int64) rt = RaggedTensor.from_row_lengths(values, row_lengths, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [5, None]) self.assertEqual(rt.ragged_rank, 1) rt_values = rt.values rt_row_lengths = rt.row_lengths() rt_nrows = rt.nrows() self.assertIs(rt_values, values) self.assertIs(rt_row_lengths, row_lengths) # cached_nrows self.assertAllEqual(rt_nrows, 5) self.assertAllEqual(rt_row_lengths, row_lengths) self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) def testFromNestedValueRowIdsWithDerivedNRows(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) nested_value_rowids = [ constant_op.constant([0, 0, 1, 3, 3], dtypes.int64), constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) ] rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [4, None, None]) self.assertEqual(rt.ragged_rank, 2) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_values_values = rt_values.values rt_values_value_rowids = rt_values.value_rowids() self.assertIs(rt_values_values, values) self.assertAllEqual(rt_value_rowids, nested_value_rowids[0]) self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1]) self.assertAllEqual( rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]]) def testFromNestedValueRowIdsWithExplicitNRows(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) nested_value_rowids = [ constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64), constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) ] nrows = [ constant_op.constant(6, dtypes.int64), constant_op.constant(6, dtypes.int64) ] rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids, nrows) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [6, None, None]) self.assertEqual(rt.ragged_rank, 2) rt_values = rt.values rt_value_rowids = rt.value_rowids() rt_nrows = rt.nrows() rt_values_values = rt_values.values rt_values_value_rowids = rt_values.value_rowids() rt_values_nrows = rt_values.nrows() self.assertIs(rt_values_values, values) self.assertAllEqual(rt_value_rowids, nested_value_rowids[0]) self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1]) self.assertAllEqual(rt_nrows, nrows[0]) self.assertAllEqual(rt_values_nrows, nrows[1]) self.assertAllEqual( rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g'], []], [], []]) def testFromNestedValueRowIdsWithExplicitNRowsMismatch(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) nested_value_rowids = [ constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64), constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) ] nrows = [constant_op.constant(6, dtypes.int64)] with self.assertRaisesRegexp( ValueError, 'nested_nrows must have the same ' 'length as nested_value_rowids'): RaggedTensor.from_nested_value_rowids(values, nested_value_rowids, nrows) def testFromNestedValueRowIdsWithNonListInput(self): with self.assertRaisesRegexp( TypeError, 'nested_value_rowids must be a list of Tensors'): RaggedTensor.from_nested_value_rowids( [1, 2, 3], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64)) with self.assertRaisesRegexp(TypeError, 'nested_nrows must be a list of Tensors'): RaggedTensor.from_nested_value_rowids([1, 2, 3], [[0, 1, 2], [0, 1, 2]], constant_op.constant([3, 3])) def testFromNestedRowSplits(self): flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) nested_row_splits = [ constant_op.constant([0, 2, 3, 3, 5], dtypes.int64), constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) ] rt = RaggedTensor.from_nested_row_splits(flat_values, nested_row_splits, validate=False) self.assertEqual(rt.dtype, dtypes.string) self.assertEqual(rt.shape.as_list(), [4, None, None]) self.assertEqual(rt.ragged_rank, 2) rt_values = rt.values rt_row_splits = rt.row_splits rt_values_values = rt_values.values rt_values_row_splits = rt_values.row_splits self.assertIs(rt_values_values, flat_values) self.assertIs(rt_row_splits, nested_row_splits[0]) self.assertIs(rt_values_row_splits, nested_row_splits[1]) self.assertAllEqual( rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]]) def testFromNestedRowSplitsWithNonListInput(self): with self.assertRaisesRegexp(TypeError, 'nested_row_splits must be a list of Tensors'): RaggedTensor.from_nested_row_splits( [1, 2], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64)) def testFromValueRowIdsWithBadNRows(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) nrows = constant_op.constant(5, dtypes.int64) with self.assertRaisesRegexp(ValueError, r'Expected nrows >= 0; got -2'): RaggedTensor.from_value_rowids( values=values, value_rowids=array_ops.placeholder_with_default(value_rowids, None), nrows=-2) with self.assertRaisesRegexp( ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, ' r'value_rowids\[-1\]=4'): RaggedTensor.from_value_rowids( values=values, value_rowids=value_rowids, nrows=2) with self.assertRaisesRegexp( ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, ' r'value_rowids\[-1\]=4'): RaggedTensor.from_value_rowids( values=values, value_rowids=value_rowids, nrows=4) with self.assertRaisesRegexp(ValueError, r'Shape \(7, 1\) must have rank 1'): RaggedTensor.from_value_rowids( values=values, value_rowids=array_ops.expand_dims(value_rowids, 1), nrows=nrows) with self.assertRaisesRegexp(ValueError, r'Shape \(1,\) must have rank 0'): RaggedTensor.from_value_rowids( values=values, value_rowids=value_rowids, nrows=array_ops.expand_dims(nrows, 0)) def testGraphMismatch(self): if not context.executing_eagerly(): with ops.Graph().as_default(): values = constant_op.constant([1, 2, 3], dtypes.int64) with ops.Graph().as_default(): splits = constant_op.constant([0, 2, 3], dtypes.int64) self.assertRaisesRegexp(ValueError, '.* must be from the same graph as .*', RaggedTensor.from_row_splits, values, splits) #============================================================================= # Ragged Value & Row-Partitioning Tensor Accessors #============================================================================= def testRaggedTensorAccessors_2d(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) rt1 = RaggedTensor.from_row_splits(values, row_splits) rt2 = RaggedTensor.from_value_rowids(values, value_rowids) for rt in [rt1, rt2]: self.assertAllEqual( rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) self.assertAllEqual(rt.values, [b'a', b'b', b'c', b'd', b'e', b'f', b'g']) self.assertEqual(rt.values.shape.dims[0].value, 7) self.assertAllEqual(rt.value_rowids(), [0, 0, 2, 2, 2, 3, 4]) self.assertAllEqual(rt.nrows(), 5) self.assertAllEqual(rt.row_splits, [0, 2, 2, 5, 6, 7]) self.assertAllEqual(rt.row_starts(), [0, 2, 2, 5, 6]) self.assertAllEqual(rt.row_limits(), [2, 2, 5, 6, 7]) self.assertAllEqual(rt.row_lengths(), [2, 0, 3, 1, 1]) self.assertAllEqual(rt.flat_values, [b'a', b'b', b'c', b'd', b'e', b'f', b'g']) self.assertLen(rt.nested_row_splits, 1) self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 2, 5, 6, 7]) def testRaggedTensorAccessors_3d_with_ragged_rank_1(self): values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]] row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) rt1 = RaggedTensor.from_row_splits(values, row_splits) rt2 = RaggedTensor.from_value_rowids(values, value_rowids) for rt in [rt1, rt2]: self.assertAllEqual( rt, [[[0, 1], [2, 3]], [], [[4, 5], [6, 7], [8, 9]], [[10, 11]], [[12, 13]]]) self.assertAllEqual( rt.values, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]) self.assertEqual(rt.values.shape.dims[0].value, 7) self.assertAllEqual( rt.value_rowids(), [0, 0, 2, 2, 2, 3, 4]) self.assertAllEqual(rt.nrows(), 5) self.assertAllEqual(rt.row_splits, [0, 2, 2, 5, 6, 7]) self.assertAllEqual(rt.row_starts(), [0, 2, 2, 5, 6]) self.assertAllEqual(rt.row_limits(), [2, 2, 5, 6, 7]) self.assertAllEqual(rt.row_lengths(), [2, 0, 3, 1, 1]) self.assertAllEqual( rt.flat_values, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]) self.assertLen(rt.nested_row_splits, 1) self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 2, 5, 6, 7]) self.assertLen(rt.nested_value_rowids(), 1) self.assertAllEqual(rt.nested_value_rowids()[0], [0, 0, 2, 2, 2, 3, 4]) def testRaggedTensorAccessors_3d_with_ragged_rank_2(self): values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g']) nested_row_splits = [ constant_op.constant([0, 2, 3, 3, 5], dtypes.int64), constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64) ] nested_value_rowids = [ constant_op.constant([0, 0, 1, 3, 3], dtypes.int64), constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64) ] rt1 = RaggedTensor.from_nested_row_splits(values, nested_row_splits) rt2 = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids) for rt in [rt1, rt2]: self.assertAllEqual( rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]]) self.assertAllEqual( rt.values, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']]) self.assertEqual(rt.values.shape.dims[0].value, 5) self.assertAllEqual(rt.value_rowids(), [0, 0, 1, 3, 3]) self.assertAllEqual(rt.nrows(), 4) self.assertAllEqual(rt.row_splits, [0, 2, 3, 3, 5]) self.assertAllEqual(rt.row_starts(), [0, 2, 3, 3]) self.assertAllEqual(rt.row_limits(), [2, 3, 3, 5]) self.assertAllEqual(rt.row_lengths(), [2, 1, 0, 2]) self.assertAllEqual( rt.flat_values, [b'a', b'b', b'c', b'd', b'e', b'f', b'g']) self.assertLen(rt.nested_row_splits, 2) self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 3, 3, 5]) self.assertAllEqual(rt.nested_row_splits[1], [0, 2, 2, 5, 6, 7]) self.assertLen(rt.nested_value_rowids(), 2) self.assertAllEqual(rt.nested_value_rowids()[0], [0, 0, 1, 3, 3]) self.assertAllEqual(rt.nested_value_rowids()[1], [0, 0, 2, 2, 2, 3, 4]) #============================================================================= # RaggedTensor.shape #============================================================================= def testShape(self): """Tests for RaggedTensor.shape.""" rt1 = RaggedTensor.from_row_splits(b'a b c d e f g'.split(), [0, 2, 5, 6, 6, 7]) self.assertEqual(rt1.shape.as_list(), [5, None]) rt2 = RaggedTensor.from_row_splits( [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]], [0, 2, 5, 6, 6, 7]) self.assertEqual(rt2.shape.as_list(), [5, None, 2]) rt3 = RaggedTensor.from_row_splits( [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], [0, 2, 2, 3]) self.assertEqual(rt3.shape.as_list(), [3, None, 2, 2]) rt4 = RaggedTensor.from_row_splits(rt3, [0, 1, 3, 3]) self.assertEqual(rt4.shape.as_list(), [3, None, None, 2, 2]) if not context.executing_eagerly(): rt5 = RaggedTensor.from_row_splits( array_ops.placeholder(dtype=dtypes.string), [0, 2, 3, 5]) self.assertEqual(rt5.shape.ndims, None) rt6 = RaggedTensor.from_row_splits( [1, 2, 3], array_ops.placeholder(dtype=dtypes.int64)) self.assertEqual(rt6.shape.as_list(), [None, None]) #============================================================================= # RaggedTensor.__getitem__ #============================================================================= def _TestGetItem(self, rt, slice_spec, expected): """Helper function for testing RaggedTensor.__getitem__. Checks that calling `rt.__getitem__(slice_spec) returns the expected value. Checks three different configurations for each slice spec: * Call __getitem__ with the slice spec as-is (with int values) * Call __getitem__ with int values in the slice spec wrapped in `tf.constant()`. * Call __getitem__ with int values in the slice spec wrapped in `tf.compat.v1.placeholder()` (so value is not known at graph construction time). Args: rt: The RaggedTensor to test. slice_spec: The slice spec. expected: The expected value of rt.__getitem__(slice_spec), as a python list; or an exception class. """ tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True) tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False) value1 = rt.__getitem__(slice_spec) value2 = rt.__getitem__(tensor_slice_spec1) value3 = rt.__getitem__(tensor_slice_spec2) self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,)) self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,)) self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,)) def _TestGetItemException(self, rt, slice_spec, expected, message): """Helper function for testing RaggedTensor.__getitem__ exceptions.""" tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True) self.assertRaisesRegexp(expected, message, rt.__getitem__, slice_spec) self.assertRaisesRegexp(expected, message, rt.__getitem__, tensor_slice_spec1) @parameterized.parameters( # Tests for rt[i] (SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]), (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]), (SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]), (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]), (SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]), (SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]), # Tests for rt[i:] (SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]), (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]), (SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]), (SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]), (SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]), (SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]), # Tests for rt[:j] (SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]), (SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]), (SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]), (SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]), (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]), (SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]), # Tests for rt[i:j] (SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]), (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]), (SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]), (SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]), (SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]), (SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]), # Tests for rt[i, j] (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]), (SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]), (SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]), (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]), (SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D), (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D), # Empty slice spec. ([], EXAMPLE_RAGGED_TENSOR_2D), # Test for ellipsis (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D), (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]), (SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D), (SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]), (SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]), (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]), # Test for array_ops.newaxis (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]), (SLICE_BUILDER[:, array_ops.newaxis], [[row] for row in EXAMPLE_RAGGED_TENSOR_2D]), # Slicing inner ragged dimensions. (SLICE_BUILDER[-1:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]), (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]), (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]), # TODO(edloper): Add tests for strided slices, once support is added. ) def testRaggedTensorGetItemWithRaggedRank1(self, slice_spec, expected): """Test that rt.__getitem__(slice_spec) == expected.""" # Ragged tensor rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, EXAMPLE_RAGGED_TENSOR_2D_SPLITS) self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D) self._TestGetItem(rt, slice_spec, expected) # pylint: disable=invalid-slice-index @parameterized.parameters( # Tests for out-of-bound errors (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[0, 2], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[3, 0], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), # Indexing into an inner ragged dimension (SLICE_BUILDER[:, 3], ValueError, 'Cannot index into an inner ragged dimension'), (SLICE_BUILDER[:1, 3], ValueError, 'Cannot index into an inner ragged dimension'), (SLICE_BUILDER[..., 3], ValueError, 'Cannot index into an inner ragged dimension'), # Tests for type errors (SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)), (SLICE_BUILDER[1:3:0.5], TypeError, re.escape( array_ops._SLICE_TYPE_ERROR)), (SLICE_BUILDER[:, 1:3:0.5], TypeError, 'slice strides must be integers or None'), (SLICE_BUILDER[:, 0.5:1.5], TypeError, 'slice offsets must be integers or None'), (SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)), (SLICE_BUILDER[:, 'foo':'foo'], TypeError, 'slice offsets must be integers or None'), # Tests for other errors (SLICE_BUILDER[..., 0, 0, 0], IndexError, 'Too many indices for RaggedTensor'), ) def testRaggedTensorGetItemErrorsWithRaggedRank1(self, slice_spec, expected, message): """Test that rt.__getitem__(slice_spec) == expected.""" # Ragged tensor rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, EXAMPLE_RAGGED_TENSOR_2D_SPLITS) self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D) self._TestGetItemException(rt, slice_spec, expected, message) @parameterized.parameters( # Tests for rt[index, index, ...] (SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]), (SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]), (SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]), (SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]), (SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]), (SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]), (SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]), # Tests for rt[index, slice, ...] (SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]), (SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]), (SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]), (SLICE_BUILDER[1, :, :, 1], []), (SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]), (SLICE_BUILDER[3, :, :, 1], [[20]]), # Tests for rt[slice, slice, ...] (SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D), (SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]], [[20]]]), (SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]), (SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]), # Test for ellipsis (SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D), (SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]), (SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]), (SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]], [[19]]]), (SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]), (SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]), # Test for array_ops.newaxis (SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, array_ops.newaxis], [[row] for row in EXAMPLE_RAGGED_TENSOR_4D]), # Empty slice spec. ([], EXAMPLE_RAGGED_TENSOR_4D), # Slicing inner ragged dimensions. (SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, :, :-1], [[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, :, 1:2], [[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[1:, 1:3, 1:2], [[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]), # Strided slices (SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]), (SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]), (SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, :, ::2], [[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]), (SLICE_BUILDER[:, :, 1::2], [[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]), # TODO(edloper): Add tests for strided slices, once support is added. # TODO(edloper): Add tests slicing inner ragged dimensions, one support # is added. ) def testRaggedTensorGetItemWithRaggedRank2(self, slice_spec, expected): """Test that rt.__getitem__(slice_spec) == expected.""" rt = RaggedTensor.from_nested_row_splits( EXAMPLE_RAGGED_TENSOR_4D_VALUES, [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2]) self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D) self._TestGetItem(rt, slice_spec, expected) @parameterized.parameters( # Test for errors in unsupported cases (SLICE_BUILDER[:, 0], ValueError, 'Cannot index into an inner ragged dimension.'), (SLICE_BUILDER[:, :, 0], ValueError, 'Cannot index into an inner ragged dimension.'), # Test for out-of-bounds errors. (SLICE_BUILDER[1, 0], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[0, 0, 3], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[0, 5], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), ) def testRaggedTensorGetItemErrorsWithRaggedRank2(self, slice_spec, expected, message): """Test that rt.__getitem__(slice_spec) == expected.""" rt = RaggedTensor.from_nested_row_splits( EXAMPLE_RAGGED_TENSOR_4D_VALUES, [EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2]) self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D) self._TestGetItemException(rt, slice_spec, expected, message) @parameterized.parameters( (SLICE_BUILDER[:], []), (SLICE_BUILDER[2:], []), (SLICE_BUILDER[:-3], []), ) def testRaggedTensorGetItemWithEmptyTensor(self, slice_spec, expected): """Test that rt.__getitem__(slice_spec) == expected.""" rt = RaggedTensor.from_row_splits([], [0]) self._TestGetItem(rt, slice_spec, expected) @parameterized.parameters( (SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), (SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError), '.*out of bounds.*'), ) def testRaggedTensorGetItemErrorsWithEmptyTensor(self, slice_spec, expected, message): """Test that rt.__getitem__(slice_spec) == expected.""" rt = RaggedTensor.from_row_splits([], [0]) self._TestGetItemException(rt, slice_spec, expected, message) @parameterized.parameters( (SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]), (SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]), (SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]), (SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]), (SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]), (SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]), (SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]), ) def testRaggedTensorGetItemWithPlaceholderShapes(self, slice_spec, expected): """Test that rt.__getitem__(slice_spec) == expected.""" # Intentionally use an unknown shape for `splits`, to force the code path # that deals with having nrows unknown at graph construction time. splits = constant_op.constant( EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64) splits = array_ops.placeholder_with_default(splits, None) rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits) self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D) self._TestGetItem(rt, slice_spec, expected) @parameterized.parameters( (SLICE_BUILDER[..., 2], ValueError, 'Ellipsis not supported for unknown shape RaggedTensors'),) def testRaggedTensorGetItemErrorsWithPlaceholderShapes( self, slice_spec, expected, message): """Test that rt.__getitem__(slice_spec) == expected.""" if not context.executing_eagerly(): # Intentionally use an unknown shape for `values`. values = array_ops.placeholder_with_default([0], None) rt = RaggedTensor.from_row_splits(values, [0, 1]) self._TestGetItemException(rt, slice_spec, expected, message) def testGetItemNewAxis(self): # rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []] splits1 = [0, 3, 3] splits2 = [0, 2, 2, 3] values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']]) rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2]) rt_newaxis0 = rt[array_ops.newaxis] rt_newaxis1 = rt[:, array_ops.newaxis] rt_newaxis2 = rt[:, :, array_ops.newaxis] rt_newaxis3 = rt[:, :, :, array_ops.newaxis] rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis] self.assertAllEqual( rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]) self.assertAllEqual( rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]]) self.assertAllEqual( rt_newaxis1, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]]) self.assertAllEqual( rt_newaxis2, [[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []]) self.assertAllEqual( rt_newaxis3, [[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []]) self.assertAllEqual( rt_newaxis4, [[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []]) self.assertEqual(rt.ragged_rank, 2) self.assertEqual(rt_newaxis0.ragged_rank, 3) self.assertEqual(rt_newaxis1.ragged_rank, 3) self.assertEqual(rt_newaxis2.ragged_rank, 3) self.assertEqual(rt_newaxis3.ragged_rank, 2) self.assertEqual(rt_newaxis4.ragged_rank, 2) self.assertEqual(rt_newaxis0.shape.as_list(), [1, None, None, None, 2]) self.assertEqual(rt_newaxis1.shape.as_list(), [2, None, None, None, 2]) self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, None, None, 2]) self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2]) self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1]) #============================================================================= # RaggedTensor.__str__ #============================================================================= def testRaggedTensorStr(self): values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g'] row_splits = [0, 2, 5, 6, 6, 7] rt = RaggedTensor.from_row_splits(values, row_splits, validate=False) splits_type = 'int64' if context.executing_eagerly(): expected_repr = '<tf.RaggedTensor {}>'.format([[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [], [b'g']]) else: expected_repr = ( 'tf.RaggedTensor(values=Tensor("RaggedFromRowSplits/values:0", ' 'shape=(7,), dtype=string), row_splits=' 'Tensor("RaggedFromRowSplits/row_splits:0", ' 'shape=(6,), dtype={}))').format(splits_type) self.assertEqual(repr(rt), expected_repr) self.assertEqual(str(rt), expected_repr) def testRaggedTensorValueStr(self): values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g'] row_splits = [0, 2, 5, 6, 6, 7] rt = ragged_tensor_value.RaggedTensorValue( np.array(values), np.array(row_splits, dtype=np.int64)) expected_str = '<tf.RaggedTensorValue {}>'.format([[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [], [b'g']]) expected_repr = ("tf.RaggedTensorValue(values=array({}, dtype='|S1'), " 'row_splits=array({}))'.format(values, row_splits)) self.assertEqual(' '.join(str(rt).split()), expected_str) self.assertEqual(' '.join(repr(rt).split()), expected_repr) #============================================================================= # RaggedTensor.with_values() and RaggedTensor.with_flat_values(). #============================================================================= def testWithValues(self): rt1 = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]]) rt2 = ragged_factory_ops.constant([[[1, 2], [3, 4, 5]], [[6]], [], [[], [7]]]) rt1_plus_10 = rt1.with_values(rt1.values + 10) rt2_times_10 = rt2.with_flat_values(rt2.flat_values * 10) rt1_expanded = rt1.with_values(array_ops.expand_dims(rt1.values, axis=1)) self.assertAllEqual( rt1_plus_10, [[11, 12], [13, 14, 15], [16], [], [17]]) self.assertAllEqual( rt2_times_10, [[[10, 20], [30, 40, 50]], [[60]], [], [[], [70]]]) self.assertAllEqual( rt1_expanded, [[[1], [2]], [[3], [4], [5]], [[6]], [], [[7]]]) #============================================================================= # Session.run #============================================================================= def testSessionRun(self): if context.executing_eagerly(): return rt1 = ragged_factory_ops.constant([[1, 2, 3], [4]]) rt2 = ragged_factory_ops.constant([[[], [1, 2]], [[3]]]) with self.test_session() as session: result = session.run({'rt1': rt1, 'rt2': rt2}) self.assertCountEqual(result.keys(), ['rt1', 'rt2']) self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]]) self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]]) def testSessionRunFeed(self): if context.executing_eagerly(): return rt1 = RaggedTensor.from_row_splits( array_ops.placeholder(dtypes.int32), array_ops.placeholder(dtypes.int64)) rt2 = RaggedTensor.from_nested_row_splits( array_ops.placeholder(dtypes.int32), [ array_ops.placeholder(dtypes.int64), array_ops.placeholder(dtypes.int64) ]) rt1_feed_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]]) rt2_feed_val = ragged_factory_ops.constant_value([[[], [1, 2]], [[3]]]) with self.test_session() as session: fetches = {'rt1': rt1, 'rt2': rt2} feeds = {rt1: rt1_feed_val, rt2: rt2_feed_val} result = session.run(fetches, feed_dict=feeds) self.assertCountEqual(result.keys(), ['rt1', 'rt2']) self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]]) self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]]) def testSessionPartialRunFeed(self): if context.executing_eagerly(): return # Placeholder inputs. a = RaggedTensor.from_row_splits( array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'), array_ops.placeholder(dtypes.int64, name='a.row_splits')) b = RaggedTensor.from_row_splits( array_ops.placeholder(dtypes.int32, shape=[None], name='b.values'), array_ops.placeholder(dtypes.int64, name='b.row_splits')) c = array_ops.placeholder(dtypes.int32, shape=[], name='c') # Feed values for placeholder inputs. a_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]]) b_val = ragged_factory_ops.constant_value([[5, 4, 3], [2]]) c_val = 3 # Compute some values. r1 = ragged_math_ops.reduce_sum(a * b, axis=1) r2 = ragged_math_ops.reduce_sum(a + c, axis=1) with self.test_session() as session: handle = session.partial_run_setup([r1, r2], [a, b, c]) res1 = session.partial_run(handle, r1, feed_dict={a: a_val, b: b_val}) self.assertAllEqual(res1, [22, 8]) res2 = session.partial_run(handle, r2, feed_dict={c: c_val}) self.assertAllEqual(res2, [15, 7]) # Test case for GitHub issue 24679. def testEagerForLoop(self): if not context.executing_eagerly(): return values = [[1., 2.], [3., 4., 5.], [6.]] r = ragged_factory_ops.constant(values) i = 0 for elem in r: self.assertAllEqual(elem, values[i]) i += 1 def testConsumers(self): if context.executing_eagerly(): return a = RaggedTensor.from_row_splits( array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'), array_ops.placeholder(dtypes.int64, name='a.row_splits'), validate=False) ragged_math_ops.reduce_sum(a) self.assertLen(a.consumers(), 1) @parameterized.parameters([ # from_value_rowids {'descr': 'bad rank for value_rowids', 'factory': RaggedTensor.from_value_rowids, 'values': [[1, 2], [3, 4]], 'value_rowids': [[1, 2], [3, 4]], 'nrows': 10}, {'descr': 'bad rank for nrows', 'factory': RaggedTensor.from_value_rowids, 'values': [1, 2, 3, 4], 'value_rowids': [1, 2, 3, 4], 'nrows': [10]}, {'descr': 'len(values) != len(value_rowids)', 'factory': RaggedTensor.from_value_rowids, 'values': [1, 2, 3, 4], 'value_rowids': [1, 2, 3, 4, 5], 'nrows': 10}, {'descr': 'negative value_rowid', 'factory': RaggedTensor.from_value_rowids, 'values': [1, 2, 3, 4], 'value_rowids': [-5, 2, 3, 4], 'nrows': 10}, {'descr': 'non-monotonic-increasing value_rowid', 'factory': RaggedTensor.from_value_rowids, 'values': [1, 2, 3, 4], 'value_rowids': [4, 3, 2, 1], 'nrows': 10}, {'descr': 'value_rowid > nrows', 'factory': RaggedTensor.from_value_rowids, 'values': [1, 2, 3, 4], 'value_rowids': [1, 2, 3, 4], 'nrows': 2}, {'descr': 'bad rank for values', 'factory': RaggedTensor.from_value_rowids, 'values': 10, 'value_rowids': [1, 2, 3, 4], 'nrows': 10}, # from_row_splits {'descr': 'bad rank for row_splits', 'factory': RaggedTensor.from_row_splits, 'values': [[1, 2], [3, 4]], 'row_splits': [[1, 2], [3, 4]]}, {'descr': 'row_splits[0] != 0', 'factory': RaggedTensor.from_row_splits, 'values': [1, 2, 3, 4], 'row_splits': [2, 3, 4]}, {'descr': 'non-monotonic-increasing row_splits', 'factory': RaggedTensor.from_row_splits, 'values': [1, 2, 3, 4], 'row_splits': [0, 3, 2, 4]}, {'descr': 'row_splits[0] != nvals', 'factory': RaggedTensor.from_row_splits, 'values': [1, 2, 3, 4], 'row_splits': [0, 2, 3, 5]}, {'descr': 'bad rank for values', 'factory': RaggedTensor.from_row_splits, 'values': 10, 'row_splits': [0, 1]}, # from_row_lengths {'descr': 'bad rank for row_lengths', 'factory': RaggedTensor.from_row_lengths, 'values': [1, 2, 3, 4], 'row_lengths': [[1, 2], [1, 0]]}, {'descr': 'negatve row_lengths', 'factory': RaggedTensor.from_row_lengths, 'values': [1, 2, 3, 4], 'row_lengths': [3, -1, 2]}, {'descr': 'sum(row_lengths) != nvals', 'factory': RaggedTensor.from_row_lengths, 'values': [1, 2, 3, 4], 'row_lengths': [2, 4, 2, 8]}, {'descr': 'bad rank for values', 'factory': RaggedTensor.from_row_lengths, 'values': 10, 'row_lengths': [0, 1]}, # from_row_starts {'descr': 'bad rank for row_starts', 'factory': RaggedTensor.from_row_starts, 'values': [[1, 2], [3, 4]], 'row_starts': [[1, 2], [3, 4]]}, {'descr': 'row_starts[0] != 0', 'factory': RaggedTensor.from_row_starts, 'values': [1, 2, 3, 4], 'row_starts': [2, 3, 4]}, {'descr': 'non-monotonic-increasing row_starts', 'factory': RaggedTensor.from_row_starts, 'values': [1, 2, 3, 4], 'row_starts': [0, 3, 2, 4]}, {'descr': 'row_starts[0] > nvals', 'factory': RaggedTensor.from_row_starts, 'values': [1, 2, 3, 4], 'row_starts': [0, 2, 3, 5]}, {'descr': 'bad rank for values', 'factory': RaggedTensor.from_row_starts, 'values': 10, 'row_starts': [0, 1]}, # from_row_limits {'descr': 'bad rank for row_limits', 'factory': RaggedTensor.from_row_limits, 'values': [[1, 2], [3, 4]], 'row_limits': [[1, 2], [3, 4]]}, {'descr': 'row_limits[0] < 0', 'factory': RaggedTensor.from_row_limits, 'values': [1, 2, 3, 4], 'row_limits': [-1, 3, 4]}, {'descr': 'non-monotonic-increasing row_limits', 'factory': RaggedTensor.from_row_limits, 'values': [1, 2, 3, 4], 'row_limits': [0, 3, 2, 4]}, {'descr': 'row_limits[0] != nvals', 'factory': RaggedTensor.from_row_limits, 'values': [1, 2, 3, 4], 'row_limits': [0, 2, 3, 5]}, {'descr': 'bad rank for values', 'factory': RaggedTensor.from_row_limits, 'values': 10, 'row_limits': [0, 1]}, ]) def testFactoryValidation(self, descr, factory, **kwargs): # When input tensors have shape information, some of these errors will be # detected statically. with self.assertRaises((errors.InvalidArgumentError, ValueError)): self.evaluate(factory(**kwargs)) # Remove shape information (by wraping tensors in placeholders), and check # that we detect the errors when the graph is run. if not context.executing_eagerly(): def wrap_arg(v): return array_ops.placeholder_with_default( constant_op.constant(v, dtype=dtypes.int64), tensor_shape.TensorShape(None)) kwargs = dict((k, wrap_arg(v)) for (k, v) in kwargs.items()) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(factory(**kwargs)) #============================================================================= # RaggedTensor Variant conversion #============================================================================= @parameterized.parameters( { 'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]], 'ragged_rank': 1 }, { 'ragged_constant': [[[1, 2]], [], [[3, 4]], []], 'ragged_rank': 1 }, { 'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]], 'ragged_rank': 2 }) def testRaggedToVariant(self, ragged_constant, ragged_rank): rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank) et = rt._to_variant() self.assertEqual(et.shape.as_list(), []) self.assertEqual(et.dtype, dtypes.variant) @parameterized.parameters( { 'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]], 'ragged_rank': 1, 'num_batched_elems': 5 }, { 'ragged_constant': [[[1, 2]], [], [[3, 4]], []], 'ragged_rank': 1, 'num_batched_elems': 4 }, { 'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]], 'ragged_rank': 2, 'num_batched_elems': 2 }) def testRaggedToBatchedVariant(self, ragged_constant, ragged_rank, num_batched_elems): rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank) et = rt._to_variant(batched_input=True) self.assertEqual(et.shape.as_list(), [num_batched_elems]) self.assertEqual(et.dtype, dtypes.variant) @parameterized.parameters( # 2D test cases. { 'ragged_constant': [[]], 'ragged_rank': 1, }, { 'ragged_constant': [[1]], 'ragged_rank': 1, }, { 'ragged_constant': [[1, 2]], 'ragged_rank': 1, }, { 'ragged_constant': [[1], [2], [3]], 'ragged_rank': 1, }, { 'ragged_constant': [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 'ragged_rank': 1, }, { 'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]], 'ragged_rank': 1, }, # 3D test cases. { 'ragged_constant': [[[]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1, 2]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1, 2], [3, 4]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]], 'ragged_rank': 2, }, { 'ragged_constant': [[[1, 2]], [], [[3, 4]], []], 'ragged_rank': 2, }, # 4D test cases. { 'ragged_constant': [[[[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]], []], 'ragged_rank': 3, }, # dtype `string`. { 'ragged_constant': [['a'], ['b'], ['c']], 'ragged_rank': 1, 'dtype': dtypes.string, }, { 'ragged_constant': [[['a', 'b'], ['c', 'd']]], 'ragged_rank': 2, 'dtype': dtypes.string, }, { 'ragged_constant': [[[['a', 'b'], ['c', 'd']]], [[['e', 'f'], ['g', 'h']], [['i', 'j'], ['k', 'l']]], []], 'ragged_rank': 3, 'dtype': dtypes.string, }) def testVariantRoundTrip(self, ragged_constant, ragged_rank, dtype=dtypes.int32): rt = ragged_factory_ops.constant( ragged_constant, ragged_rank=ragged_rank, dtype=dtype) et = rt._to_variant() round_trip_rt = RaggedTensor._from_variant( et, dtype, output_ragged_rank=ragged_rank) self.assertAllEqual(rt, round_trip_rt) def testBatchedVariantRoundTripInputRaggedRankInferred(self): ragged_rank = 1 rt = ragged_factory_ops.constant( [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], ragged_rank=ragged_rank) batched_variant = rt._to_variant(batched_input=True) nested_batched_variant = array_ops.reshape(batched_variant, [5, 2]) decoded_rt = RaggedTensor._from_variant( nested_batched_variant, dtype=dtypes.int32, output_ragged_rank=ragged_rank + 1) expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4], [5]], [[6], [7]], [[8], [9]]]) self.assertAllEqual(decoded_rt, expected_rt) def testBatchedVariantRoundTripWithInputRaggedRank(self): ragged_rank = 1 rt = ragged_factory_ops.constant( [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], ragged_rank=ragged_rank) batched_variant = rt._to_variant(batched_input=True) nested_batched_variant = array_ops.reshape(batched_variant, [5, 2]) decoded_rt = RaggedTensor._from_variant( nested_batched_variant, dtype=dtypes.int32, output_ragged_rank=ragged_rank + 1, input_ragged_rank=ragged_rank - 1) expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4], [5]], [[6], [7]], [[8], [9]]]) self.assertAllEqual(decoded_rt, expected_rt) def testFromVariantInvalidParams(self): rt = ragged_factory_ops.constant([[0], [1], [2], [3]]) batched_variant = rt._to_variant(batched_input=True) nested_batched_variant = array_ops.reshape(batched_variant, [2, 2]) with self.assertRaisesRegexp(ValueError, 'output_ragged_rank must be equal to'): RaggedTensor._from_variant( nested_batched_variant, dtype=dtypes.int32, output_ragged_rank=1, input_ragged_rank=1) @test_util.run_all_in_graph_and_eager_modes class RaggedTensorSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase): def assertAllTensorsEqual(self, list1, list2): self.assertLen(list1, len(list2)) for (t1, t2) in zip(list1, list2): self.assertAllEqual(t1, t2) def testConstruction(self): spec1 = RaggedTensorSpec(ragged_rank=1) self.assertEqual(spec1._shape.rank, None) self.assertEqual(spec1._dtype, dtypes.float32) self.assertEqual(spec1._row_splits_dtype, dtypes.int64) self.assertEqual(spec1._ragged_rank, 1) spec2 = RaggedTensorSpec(shape=[None, None, None]) self.assertEqual(spec2._shape.as_list(), [None, None, None]) self.assertEqual(spec2._dtype, dtypes.float32) self.assertEqual(spec2._row_splits_dtype, dtypes.int64) self.assertEqual(spec2._ragged_rank, 2) with self.assertRaisesRegexp(ValueError, 'Must specify ragged_rank'): RaggedTensorSpec() with self.assertRaisesRegexp(TypeError, 'ragged_rank must be an int'): RaggedTensorSpec(ragged_rank=constant_op.constant(1)) with self.assertRaisesRegexp(ValueError, 'ragged_rank must be less than rank'): RaggedTensorSpec(ragged_rank=2, shape=[None, None]) def testValueType(self): spec1 = RaggedTensorSpec(ragged_rank=1) self.assertEqual(spec1.value_type, RaggedTensor) spec2 = RaggedTensorSpec(ragged_rank=0) self.assertEqual(spec2.value_type, ops.Tensor) @parameterized.parameters([ (RaggedTensorSpec(ragged_rank=1), (tensor_shape.TensorShape(None), dtypes.float32, 1, dtypes.int64)), (RaggedTensorSpec(shape=[5, None, None]), (tensor_shape.TensorShape([5, None, None]), dtypes.float32, 2, dtypes.int64)), (RaggedTensorSpec(shape=[5, None, None], dtype=dtypes.int32), (tensor_shape.TensorShape([5, None, None]), dtypes.int32, 2, dtypes.int64)), (RaggedTensorSpec(ragged_rank=1, row_splits_dtype=dtypes.int32), (tensor_shape.TensorShape(None), dtypes.float32, 1, dtypes.int32)), ]) # pyformat: disable def testSerialize(self, rt_spec, expected): serialization = rt_spec._serialize() # TensorShape has an unconventional definition of equality, so we can't use # assertEqual directly here. But repr() is deterministic and lossless for # the expected values, so we can use that instead. self.assertEqual(repr(serialization), repr(expected)) @parameterized.parameters([ (RaggedTensorSpec(ragged_rank=0, shape=[5, 3]), [ tensor_spec.TensorSpec([5, 3], dtypes.float32), ]), (RaggedTensorSpec(ragged_rank=1), [ tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec([None], dtypes.int64) ]), (RaggedTensorSpec(ragged_rank=1, row_splits_dtype=dtypes.int32), [ tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec([None], dtypes.int32), ]), (RaggedTensorSpec(ragged_rank=2), [ tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec([None], dtypes.int64), tensor_spec.TensorSpec([None], dtypes.int64), ]), (RaggedTensorSpec(shape=[5, None, None], dtype=dtypes.string), [ tensor_spec.TensorSpec([None], dtypes.string), tensor_spec.TensorSpec([6], dtypes.int64), tensor_spec.TensorSpec([None], dtypes.int64), ]), ]) def testComponentSpecs(self, rt_spec, expected): self.assertEqual(rt_spec._component_specs, expected) @parameterized.parameters([ { 'rt_spec': RaggedTensorSpec(ragged_rank=0), 'rt': [1.0, 2.0, 3.0], 'components': [[1.0, 2.0, 3.0]] }, { 'rt_spec': RaggedTensorSpec(ragged_rank=1), 'rt': [[1.0, 2.0], [3.0]], 'components': [[1.0, 2.0, 3.0], [0, 2, 3]] }, { 'rt_spec': RaggedTensorSpec(shape=[2, None, None]), 'rt': [[[1.0, 2.0], [3.0]], [[], [4.0]]], 'components': [[1.0, 2.0, 3.0, 4.0], [0, 2, 4], [0, 2, 3, 3, 4]] }, ]) def testToFromComponents(self, rt_spec, rt, components): rt = ragged_factory_ops.constant(rt) actual_components = rt_spec._to_components(rt) self.assertAllTensorsEqual(actual_components, components) rt_reconstructed = rt_spec._from_components(actual_components) self.assertAllEqual(rt, rt_reconstructed) @test_util.run_v1_only('RaggedTensorValue is deprecated in v2') def testFromNumpyComponents(self): spec1 = RaggedTensorSpec(ragged_rank=1, dtype=dtypes.int32) rt1 = spec1._from_components([np.array([1, 2, 3]), np.array([0, 2, 3])]) self.assertIsInstance(rt1, ragged_tensor_value.RaggedTensorValue) self.assertAllEqual(rt1, [[1, 2], [3]]) spec2 = RaggedTensorSpec(ragged_rank=2, dtype=dtypes.int32) rt2 = spec2._from_components([np.array([1, 2, 3]), np.array([0, 2, 3]), np.array([0, 0, 2, 3])]) self.assertIsInstance(rt2, ragged_tensor_value.RaggedTensorValue) self.assertAllEqual(rt2, [[[], [1, 2]], [[3]]]) spec3 = RaggedTensorSpec(ragged_rank=0, dtype=dtypes.int32) rt3 = spec3._from_components([np.array([1, 2, 3])]) self.assertIsInstance(rt3, np.ndarray) self.assertAllEqual(rt3, [1, 2, 3]) @parameterized.parameters([ RaggedTensorSpec(ragged_rank=0, shape=[5, 3]), RaggedTensorSpec(ragged_rank=1), RaggedTensorSpec(ragged_rank=1, row_splits_dtype=dtypes.int32), RaggedTensorSpec(ragged_rank=2, dtype=dtypes.string), RaggedTensorSpec(shape=[5, None, None]), ]) def testFlatTensorSpecs(self, rt_spec): self.assertEqual(rt_spec._flat_tensor_specs, [tensor_spec.TensorSpec(None, dtypes.variant)]) @parameterized.parameters([ { 'rt_spec': RaggedTensorSpec(ragged_rank=1), 'rt': [[1.0, 2.0], [3.0]] }, { 'rt_spec': RaggedTensorSpec(shape=[2, None, None]), 'rt': [[[1.0, 2.0], [3.0]], [[], [4.0]]] }, ]) def testToFromTensorList(self, rt_spec, rt): rt = ragged_factory_ops.constant(rt) tensor_list = rt_spec._to_tensor_list(rt) rt_reconstructed = rt_spec._from_tensor_list(tensor_list) self.assertAllEqual(rt, rt_reconstructed) @parameterized.parameters([ (RaggedTensorSpec([2, None], dtypes.float32, 1), 32, RaggedTensorSpec([32, 2, None], dtypes.float32, 2)), (RaggedTensorSpec([4, None], dtypes.float32, 1), None, RaggedTensorSpec([None, 4, None], dtypes.float32, 2)), (RaggedTensorSpec([2], dtypes.float32, -1), 32, RaggedTensorSpec([32, 2], dtypes.float32, 0)), ]) def testBatch(self, spec, batch_size, expected): self.assertEqual(spec._batch(batch_size), expected) @parameterized.parameters([ (RaggedTensorSpec([32, None, None], dtypes.float32, 2), RaggedTensorSpec([None, None], dtypes.float32, 1)), (RaggedTensorSpec([None, None, None], dtypes.float32, 2), RaggedTensorSpec([None, None], dtypes.float32, 1)), (RaggedTensorSpec([32, 2], dtypes.float32, 0), RaggedTensorSpec([2], dtypes.float32, -1)), ]) # pyformat: disable def testUnbatch(self, spec, expected): self.assertEqual(spec._unbatch(), expected) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/ops/ragged/ragged_tensor_test.py