python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like the identity matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorIdentity",
"LinearOperatorScaledIdentity",
]
class BaseLinearOperatorIdentity(linear_operator.LinearOperator):
"""Base class for Identity operators."""
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _min_matrix_dim(self):
"""Minimum of domain/range dimension, if statically available, else None."""
domain_dim = tensor_shape.dimension_value(self.domain_dimension)
range_dim = tensor_shape.dimension_value(self.range_dimension)
if domain_dim is None or range_dim is None:
return None
return min(domain_dim, range_dim)
def _min_matrix_dim_tensor(self):
"""Minimum of domain/range dimension, as a tensor."""
return math_ops.reduce_min(self.shape_tensor()[-2:])
def _ones_diag(self):
"""Returns the diagonal of this operator as all ones."""
if self.shape.is_fully_defined():
d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
else:
d_shape = array_ops.concat(
[self.batch_shape_tensor(),
[self._min_matrix_dim_tensor()]], axis=0)
return array_ops.ones(shape=d_shape, dtype=self.dtype)
@tf_export("linalg.LinearOperatorIdentity")
class LinearOperatorIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a [batch] square identity matrix.
This operator acts like a [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorIdentity` is initialized with `num_rows`, and optionally
`batch_shape`, and `dtype` arguments. If `batch_shape` is `None`, this
operator efficiently passes through all arguments. If `batch_shape` is
provided, broadcasting may occur, which will require making copies.
```python
# Create a 2 x 2 identity matrix.
operator = LinearOperatorIdentity(num_rows=2, dtype=tf.float32)
operator.to_dense()
==> [[1., 0.]
[0., 1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> 0.
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor, same as x.
y = tf.random.normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
# This broadcast does NOT require copying data, since we can infer that y
# will be passed through without changing shape. We are always able to infer
# this if the operator has no batch_shape.
x = operator.solve(y)
==> Shape [3, 2, 4] Tensor, same as y.
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, batch_shape=[2])
operator.to_dense()
==> [[[1., 0.]
[0., 1.]],
[[1., 0.]
[0., 1.]]]
# Here, even though the operator has a batch shape, the input is the same as
# the output, so x can be passed through without a copy. The operator is able
# to detect that no broadcast is necessary because both x and the operator
# have statically defined shape.
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, same as x
# Here the operator and x have different batch_shape, and are broadcast.
# This requires a copy, since the output is different size than the input.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, equal to [x, x]
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
If `batch_shape` initialization arg is `None`:
* `operator.matmul(x)` is `O(1)`
* `operator.solve(x)` is `O(1)`
* `operator.determinant()` is `O(1)`
If `batch_shape` initialization arg is provided, and static checks cannot
rule out the need to broadcast:
* `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(B1*...*Bb)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
batch_shape=None,
dtype=None,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorIdentity"):
r"""Initialize a `LinearOperatorIdentity`.
The `LinearOperatorIdentity` is initialized with arguments defining `dtype`
and shape.
This operator is able to broadcast the leading (batch) dimensions, which
sometimes requires copying data. If `batch_shape` is `None`, the operator
can take arguments of any batch shape without copying. See examples.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
dimensions. If `None`, this operator has no leading dimensions.
dtype: Data type of the matrix that this operator represents.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
ValueError: If `batch_shape` is determined statically to not be 1-D, or
negative.
ValueError: If any of the following is not `True`:
`{is_self_adjoint, is_non_singular, is_positive_definite}`.
"""
dtype = dtype or dtypes.float32
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name):
dtype = dtypes.as_dtype(dtype)
if not is_self_adjoint:
raise ValueError("An identity operator is always self adjoint.")
if not is_non_singular:
raise ValueError("An identity operator is always non-singular.")
if not is_positive_definite:
raise ValueError("An identity operator is always positive-definite.")
if not is_square:
raise ValueError("An identity operator is always square.")
super(LinearOperatorIdentity, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
if batch_shape is None:
self._batch_shape_arg = None
else:
self._batch_shape_arg = linear_operator_util.shape_tensor(
batch_shape, name="batch_shape_arg")
self._batch_shape_static = tensor_util.constant_value(
self._batch_shape_arg)
self._check_batch_shape_possibly_add_asserts()
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_rows_static))
if self._batch_shape_arg is None:
return matrix_shape
batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)
if self._batch_shape_arg is None:
return matrix_shape
return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _assert_positive_definite(self):
return control_flow_ops.no_op("assert_positive_definite")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.get_shape():
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Note that adjoint has no effect since this matrix is self-adjoint.
x = linalg.adjoint(x) if adjoint_arg else x
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
return self._possibly_broadcast_batch_shape(x)
def _determinant(self):
return array_ops.ones(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _log_abs_determinant(self):
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self._matmul(rhs, adjoint_arg=adjoint_arg)
def _trace(self):
# Get Tensor of all ones of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
else:
batch_of_ones = array_ops.ones(
shape=self.batch_shape_tensor(), dtype=self.dtype)
if self._min_matrix_dim() is not None:
return self._min_matrix_dim() * batch_of_ones
else:
return (math_ops.cast(self._min_matrix_dim_tensor(), self.dtype) *
batch_of_ones)
def _diag_part(self):
return self._ones_diag()
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name):
mat = ops.convert_to_tensor(mat, name="mat")
mat_diag = array_ops.matrix_diag_part(mat)
new_diag = 1 + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
def _check_num_rows_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
num_rows_static = self._num_rows_static
if num_rows_static is None:
return # Cannot do any other static checks.
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
def _check_batch_shape_possibly_add_asserts(self):
"""Static check of init arg `batch_shape`, possibly add asserts."""
if self._batch_shape_arg is None:
return
# Possibly add asserts
if self._assert_proper_shapes:
self._batch_shape_arg = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._batch_shape_arg,
1,
message="Argument batch_shape must be a 1-D Tensor."),
check_ops.assert_non_negative(
self._batch_shape_arg,
message="Argument batch_shape must be non-negative."),
], self._batch_shape_arg)
# Static checks
if not self._batch_shape_arg.dtype.is_integer:
raise TypeError("Argument batch_shape must be integer type. Found:"
" %s" % self._batch_shape_arg)
if self._batch_shape_static is None:
return # Cannot do any other static checks.
if self._batch_shape_static.ndim != 1:
raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
" %s" % self._batch_shape_static)
if np.any(self._batch_shape_static < 0):
raise ValueError("Argument batch_shape must be non-negative. Found:"
"%s" % self._batch_shape_static)
@tf_export("linalg.LinearOperatorScaledIdentity")
class LinearOperatorScaledIdentity(BaseLinearOperatorIdentity):
"""`LinearOperator` acting like a scaled [batch] identity matrix `A = c I`.
This operator acts like a scaled [batch] identity matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
a scaled version of the `N x N` identity matrix.
`LinearOperatorIdentity` is initialized with `num_rows`, and a `multiplier`
(a `Tensor`) of shape `[B1,...,Bb]`. `N` is set to `num_rows`, and the
`multiplier` determines the scale for each batch member.
```python
# Create a 2 x 2 scaled identity matrix.
operator = LinearOperatorIdentity(num_rows=2, multiplier=3.)
operator.to_dense()
==> [[3., 0.]
[0., 3.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> 2 * Log[3]
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> 3 * x
y = tf.random.normal(shape=[3, 2, 4])
# Note that y.shape is compatible with operator.shape because operator.shape
# is broadcast to [3, 2, 2].
x = operator.solve(y)
==> 3 * x
# Create a 2-batch of 2x2 identity matrices
operator = LinearOperatorIdentity(num_rows=2, multiplier=5.)
operator.to_dense()
==> [[[5., 0.]
[0., 5.]],
[[5., 0.]
[0., 5.]]]
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> 5 * x
# Here the operator and x have different batch_shape, and are broadcast.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> 5 * x
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
### Performance
* `operator.matmul(x)` is `O(D1*...*Dd*N*R)`
* `operator.solve(x)` is `O(D1*...*Dd*N*R)`
* `operator.determinant()` is `O(D1*...*Dd)`
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
multiplier,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorScaledIdentity"):
r"""Initialize a `LinearOperatorScaledIdentity`.
The `LinearOperatorScaledIdentity` is initialized with `num_rows`, which
determines the size of each identity matrix, and a `multiplier`,
which defines `dtype`, batch shape, and scale of each matrix.
This operator is able to broadcast the leading (batch) dimensions.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding identity matrix.
multiplier: `Tensor` of shape `[B1,...,Bb]`, or `[]` (a scalar).
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
"""
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name, values=[multiplier, num_rows]):
self._multiplier = ops.convert_to_tensor(multiplier, name="multiplier")
# Check and auto-set hints.
if not self._multiplier.dtype.is_complex:
if is_self_adjoint is False: # pylint: disable=g-bool-id-comparison
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if not is_square:
raise ValueError("A ScaledIdentity operator is always square.")
super(LinearOperatorScaledIdentity, self).__init__(
dtype=self._multiplier.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Shape [B1,...Bb, 1, 1]
self._multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(self.multiplier, -1), -1)
self._multiplier_matrix_conj = math_ops.conj(self._multiplier_matrix)
self._abs_multiplier = math_ops.abs(self.multiplier)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
self._check_num_rows_possibly_add_asserts()
self._num_rows_cast_to_dtype = math_ops.cast(self._num_rows, self.dtype)
self._num_rows_cast_to_real_dtype = math_ops.cast(self._num_rows,
self.dtype.real_dtype)
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_rows_static))
batch_shape = self.multiplier.get_shape()
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack((self._num_rows, self._num_rows), axis=0)
batch_shape = array_ops.shape(self.multiplier)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _assert_non_singular(self):
return check_ops.assert_positive(
math_ops.abs(self.multiplier), message="LinearOperator was singular")
def _assert_positive_definite(self):
return check_ops.assert_positive(
math_ops.real(self.multiplier),
message="LinearOperator was not positive definite.")
def _assert_self_adjoint(self):
imag_multiplier = math_ops.imag(self.multiplier)
return check_ops.assert_equal(
array_ops.zeros_like(imag_multiplier),
imag_multiplier,
message="LinearOperator was not self-adjoint")
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = linalg.adjoint(x) if adjoint_arg else x
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
return x * matrix
def _determinant(self):
return self.multiplier**self._num_rows_cast_to_dtype
def _log_abs_determinant(self):
return self._num_rows_cast_to_real_dtype * math_ops.log(
self._abs_multiplier)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)
rhs = control_flow_ops.with_dependencies([aps], rhs)
return rhs / matrix
def _trace(self):
# Get Tensor of all ones of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
batch_of_ones = array_ops.ones(shape=self.batch_shape, dtype=self.dtype)
else:
batch_of_ones = array_ops.ones(
shape=self.batch_shape_tensor(), dtype=self.dtype)
if self._min_matrix_dim() is not None:
return self.multiplier * self._min_matrix_dim() * batch_of_ones
else:
return (self.multiplier * math_ops.cast(self._min_matrix_dim_tensor(),
self.dtype) * batch_of_ones)
def _diag_part(self):
return self._ones_diag() * self.multiplier[..., array_ops.newaxis]
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name):
# Shape [B1,...,Bb, 1]
multiplier_vector = array_ops.expand_dims(self.multiplier, -1)
# Shape [C1,...,Cc, M, M]
mat = ops.convert_to_tensor(mat, name="mat")
# Shape [C1,...,Cc, M]
mat_diag = array_ops.matrix_diag_part(mat)
# multiplier_vector broadcasts here.
new_diag = multiplier_vector + mat_diag
return array_ops.matrix_set_diag(mat, new_diag)
@property
def multiplier(self):
"""The [batch] scalar `Tensor`, `c` in `cI`."""
return self._multiplier
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_identity.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create a Block Diagonal operator from one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorBlockDiag"]
@tf_export("linalg.LinearOperatorBlockDiag")
class LinearOperatorBlockDiag(linear_operator.LinearOperator):
"""Combines one or more `LinearOperators` in to a Block Diagonal matrix.
This operator combines one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator`, whose underlying matrix representation is
square and has each operator `opi` on the main diagonal, and zero's elsewhere.
#### Shape compatibility
If `opj` acts like a [batch] square matrix `Aj`, then `op_combined` acts like
the [batch] square matrix formed by having each matrix `Aj` on the main
diagonal.
Each `opj` is required to represent a square matrix, and hence will have
shape `batch_shape_j + [M_j, M_j]`.
If `opj` has shape `batch_shape_j + [M_j, M_j]`, then the combined operator
has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where
`broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
`j = 1,...,J`, assuming the intermediate batch shapes broadcast.
Even if the combined shape is well defined, the combined operator's
methods may fail due to lack of broadcasting ability in the defining
operators' methods.
```python
# Create a 4 x 4 linear operator combined of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorBlockDiag([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x1 = ... # Shape [2, 2] Tensor
x2 = ... # Shape [2, 2] Tensor
x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor
operator.matmul(x)
==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)])
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
operator_44 = LinearOperatorFullMatrix(matrix)
# Create a [1, 3] batch of 5 x 5 linear operators.
matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
operator_55 = LinearOperatorFullMatrix(matrix_55)
# Combine to create a [2, 3] batch of 9 x 9 operators.
operator_99 = LinearOperatorBlockDiag([operator_44, operator_55])
# Create a shape [2, 3, 9] vector.
x = tf.random.normal(shape=[2, 3, 9])
operator_99.matmul(x)
==> Shape [2, 3, 9] Tensor
```
#### Performance
The performance of `LinearOperatorBlockDiag` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name=None):
r"""Initialize a `LinearOperatorBlockDiag`.
`LinearOperatorBlockDiag` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
This is true by default, and will raise a `ValueError` otherwise.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty or are non-square.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The direct sum of non-singular operators is always non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The direct sum of self-adjoint operators is always self-adjoint.")
is_self_adjoint = True
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError(
"The direct sum of positive definite operators is always "
"positive definite.")
is_positive_definite = True
if not (is_square and all(operator.is_square for operator in operators)):
raise ValueError(
"Can only represent a block diagonal of square matrices.")
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
# Using ds to mean direct sum.
name = "_ds_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorBlockDiag, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=True,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
domain_dimension += operator.domain_dimension
range_dimension += operator.range_dimension
matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
domain_dimension = self.operators[0].domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension += operator.domain_dimension_tensor()
range_dimension += operator.range_dimension_tensor()
matrix_shape = array_ops.stack([domain_dimension, range_dimension])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_x = self._split_input_into_blocks(x, axis=split_dim)
result_list = []
for index, operator in enumerate(self.operators):
result_list += [operator.matmul(
split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
result_list = linear_operator_util.broadcast_matrix_batch_dims(
result_list)
return array_ops.concat(result_list, axis=-2)
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_rhs = self._split_input_into_blocks(rhs, axis=split_dim)
solution_list = []
for index, operator in enumerate(self.operators):
solution_list += [operator.solve(
split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
solution_list = linear_operator_util.broadcast_matrix_batch_dims(
solution_list)
return array_ops.concat(solution_list, axis=-2)
def _diag_part(self):
diag_list = []
for operator in self.operators:
# Extend the axis for broadcasting.
diag_list += [operator.diag_part()[..., array_ops.newaxis]]
diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
diagonal = array_ops.concat(diag_list, axis=-2)
return array_ops.squeeze(diagonal, axis=-1)
def _trace(self):
result = self.operators[0].trace()
for operator in self.operators[1:]:
result += operator.trace()
return result
def _to_dense(self):
num_cols = 0
rows = []
broadcasted_blocks = [operator.to_dense() for operator in self.operators]
broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
broadcasted_blocks)
for block in broadcasted_blocks:
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
mat = array_ops.concat(rows, axis=-2)
mat.set_shape(self.shape)
return mat
def _assert_non_singular(self):
return control_flow_ops.group([
operator.assert_non_singular() for operator in self.operators])
def _assert_self_adjoint(self):
return control_flow_ops.group([
operator.assert_self_adjoint() for operator in self.operators])
def _assert_positive_definite(self):
return control_flow_ops.group([
operator.assert_positive_definite() for operator in self.operators])
def _split_input_into_blocks(self, x, axis=-1):
"""Split `x` into blocks matching `operators`'s `domain_dimension`.
Specifically, if we have a block diagonal matrix, with block sizes
`[M_j, M_j] j = 1..J`, this method splits `x` on `axis` into `J`
tensors, whose shape at `axis` is `M_j`.
Args:
x: `Tensor`. `x` is split into `J` tensors.
axis: Python `Integer` representing the axis to split `x` on.
Returns:
A list of `Tensor`s.
"""
block_sizes = []
if self.shape.is_fully_defined():
for operator in self.operators:
block_sizes += [operator.domain_dimension.value]
else:
for operator in self.operators:
block_sizes += [operator.domain_dimension_tensor()]
return array_ops.split(x, block_sizes, axis=axis)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_block_diag.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composes one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorComposition"]
@tf_export("linalg.LinearOperatorComposition")
class LinearOperatorComposition(linear_operator.LinearOperator):
"""Composes one or more `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` with action defined by:
```
op_composed(x) := op1(op2(...(opJ(x)...))
```
If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the
[batch] matrix formed with the multiplication `A1 A2...AJ`.
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have
`N_j = M_{j+1}`, in which case the composed operator has shape equal to
`broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the
mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate
batch shapes broadcast. Even if the composed shape is well defined, the
composed operator's methods may fail due to lack of broadcasting ability in
the defining operators' methods.
```python
# Create a 2 x 2 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorComposition([operator_1, operator_2])
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 4 x 6 operators.
operator_46 = LinearOperatorComposition([operator_45, operator_56])
# Create a shape [2, 3, 6, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator.matmul(x)
==> Shape [2, 3, 4, 2] Tensor
```
#### Performance
The performance of `LinearOperatorComposition` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorComposition`.
`LinearOperatorComposition` is initialized with a list of operators
`[op_1,...,op_J]`. For the `matmul` method to be well defined, the
composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have
similar constraints.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The composition of non-singular operators is always non-singular.")
is_non_singular = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = "_o_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorComposition, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension.assert_is_compatible_with(operator.range_dimension)
domain_dimension = operator.domain_dimension
matrix_shape = tensor_shape.TensorShape(
[self.operators[0].range_dimension,
self.operators[-1].domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
# Don't check the matrix dimensions. That would add unnecessary Asserts to
# the graph. Things will fail at runtime naturally if shapes are
# incompatible.
matrix_shape = array_ops.stack([
self.operators[0].range_dimension_tensor(),
self.operators[-1].domain_dimension_tensor()
])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# If self.operators = [A, B], and not adjoint, then
# matmul_order_list = [B, A].
# As a result, we return A.matmul(B.matmul(x))
if adjoint:
matmul_order_list = self.operators
else:
matmul_order_list = list(reversed(self.operators))
result = matmul_order_list[0].matmul(
x, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in matmul_order_list[1:]:
result = operator.matmul(result, adjoint=adjoint)
return result
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# TODO(langmore) Implement solve using solve_ls if some intermediate
# operator maps to a high dimensional space.
# In that case, an exact solve may still be possible.
# If self.operators = [A, B], and not adjoint, then
# solve_order_list = [A, B].
# As a result, we return B.solve(A.solve(x))
if adjoint:
solve_order_list = list(reversed(self.operators))
else:
solve_order_list = self.operators
solution = solve_order_list[0].solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
for operator in solve_order_list[1:]:
solution = operator.solve(solution, adjoint=adjoint)
return solution
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_composition.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a Toeplitz matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorToeplitz",]
@tf_export("linalg.LinearOperatorToeplitz")
class LinearOperatorToeplitz(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of toeplitz matrices.
This operator acts like a [batch] Toeplitz matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of toeplitz matrices
Toeplitz means that `A` has constant diagonals. Hence, `A` can be generated
with two vectors. One represents the first column of the matrix, and the
other represents the first row.
Below is a 4 x 4 example:
```
A = |a b c d|
|e a b c|
|f e a b|
|g f e a|
```
#### Example of a Toeplitz operator.
```python
# Create a 3 x 3 Toeplitz operator.
col = [1., 2., 3.]
row = [1., 4., -9.]
operator = LinearOperatorToeplitz(col, row)
operator.to_dense()
==> [[1., 4., -9.],
[2., 1., 4.],
[3., 2., 1.]]
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
col,
row,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorToeplitz"):
r"""Initialize a `LinearOperatorToeplitz`.
Args:
col: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first column of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`col` is assumed to be the same as the first entry of `row`.
row: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The first row of the operator. Allowed dtypes: `float16`, `float32`,
`float64`, `complex64`, `complex128`. Note that the first entry of
`row` is assumed to be the same as the first entry of `col`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
"""
with ops.name_scope(name, values=[row, col]):
self._row = ops.convert_to_tensor(row, name="row")
self._col = ops.convert_to_tensor(col, name="col")
self._check_row_col(self._row, self._col)
circulant_col = array_ops.concat(
[self._col,
array_ops.zeros_like(self._col[..., 0:1]),
array_ops.reverse(self._row[..., 1:], axis=[-1])], axis=-1)
# To be used for matmul.
self._circulant = linear_operator_circulant.LinearOperatorCirculant(
fft_ops.fft(_to_complex(circulant_col)),
input_output_dtype=self._row.dtype)
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("Only square Toeplitz operators currently supported.")
is_square = True
super(LinearOperatorToeplitz, self).__init__(
dtype=self._row.dtype,
graph_parents=[self._row, self._col],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_row_col(self, row, col):
"""Static check of row and column."""
for name, tensor in [["row", row], ["col", col]]:
if tensor.get_shape().ndims is not None and tensor.get_shape().ndims < 1:
raise ValueError("Argument {} must have at least 1 dimension. "
"Found: {}".format(name, tensor))
if row.get_shape()[-1] is not None and col.get_shape()[-1] is not None:
if row.get_shape()[-1] != col.get_shape()[-1]:
raise ValueError(
"Expected square matrix, got row and col with mismatched "
"dimensions.")
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
v_shape = array_ops.broadcast_static_shape(
self.row.shape, self.col.shape)
return v_shape.concatenate(v_shape[-1:])
def _shape_tensor(self):
v_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(self.row),
array_ops.shape(self.col))
k = v_shape[-1]
return array_ops.concat((v_shape, [k]), 0)
def _assert_self_adjoint(self):
return check_ops.assert_equal(
self.row,
self.col,
message=("row and col are not the same, and "
"so this operator is not self-adjoint."))
# TODO(srvasude): Add efficient solver and determinant calculations to this
# class (based on Levinson recursion.)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a Toeplitz matrix, we can embed it in a Circulant matrix to perform
# efficient matrix multiplications. Given a Toeplitz matrix with first row
# [t_0, t_1, ... t_{n-1}] and first column [t0, t_{-1}, ..., t_{-(n-1)},
# let C by the circulant matrix with first column [t0, t_{-1}, ...,
# t_{-(n-1)}, 0, t_{n-1}, ..., t_1]. Also adjoin to our input vector `x`
# `n` zeros, to make it a vector of length `2n` (call it y). It can be shown
# that if we take the first n entries of `Cy`, this is equal to the Toeplitz
# multiplication. See:
# http://math.mit.edu/icg/resources/teaching/18.085-spring2015/toeplitz.pdf
# for more details.
x = linalg.adjoint(x) if adjoint_arg else x
expanded_x = array_ops.concat([x, array_ops.zeros_like(x)], axis=-2)
result = self._circulant.matmul(
expanded_x, adjoint=adjoint, adjoint_arg=False)
return math_ops.cast(
result[..., :self.domain_dimension_tensor(), :],
self.dtype)
def _trace(self):
return math_ops.cast(
self.domain_dimension_tensor(),
dtype=self.dtype) * self.col[..., 0]
def _diag_part(self):
diag_entry = self.col[..., 0:1]
return diag_entry * array_ops.ones(
[self.domain_dimension_tensor()], self.dtype)
@property
def col(self):
return self._col
@property
def row(self):
return self._row
def _to_complex(x):
dtype = dtypes.complex64
if x.dtype in [dtypes.float64, dtypes.complex128]:
dtype = dtypes.complex128
return math_ops.cast(x, dtype)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_toeplitz.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = dispatch.add_dispatch_support(linalg_ops.cholesky)
cholesky_solve = linalg_ops.cholesky_solve
det = dispatch.add_dispatch_support(linalg_ops.matrix_determinant)
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(slogdet)
diag = array_ops.matrix_diag
diag_part = dispatch.add_dispatch_support(array_ops.matrix_diag_part)
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = dispatch.add_dispatch_support(linalg_ops.matrix_inverse)
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(logm)
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = dispatch.add_dispatch_support(linalg_ops.matrix_solve)
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = dispatch.add_dispatch_support(math_ops.trace)
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
@dispatch.add_dispatch_support
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.linalg.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
@dispatch.add_dispatch_support
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(
array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6, matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
exp(A) = \sum_{n=0}^\infty A^n/n!
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`, or
`complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(
math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(
squarings,
matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis])
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(
squarings,
matrix.dtype))[..., array_ops.newaxis, array_ops.newaxis])
conds = (1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError('tf.linalg.expm does not support matrices of type %s' %
matrix.dtype)
numer = u + v
denom = -u + v
result = linalg_ops.matrix_solve(denom, numer)
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
c = lambda i, r: math_ops.less(i, max_squarings)
def b(i, r):
return i + 1, array_ops.where(
math_ops.less(i, squarings), math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.tridiagonal_solve')
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None,
partial_pivoting=True):
r"""Solves tridiagonal systems of equations.
The input can be supplied in various formats: `matrix`, `sequence` and
`compact`, specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
**Note**: with large batch sizes, the computation on the GPU may be slow, if
either `partial_pivoting=True` or there are multiple right-hand sides
(`K > 1`). If this issue arises, consider if it's possible to disable pivoting
and have `K = 1`, or, alternatively, consider using CPU.
On CPU, solution is computed via Gaussian elimination with or without partial
pivoting, depending on `partial_pivoting` parameter. On GPU, Nvidia's cuSPARSE
library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
partial_pivoting: whether to perform partial pivoting. `True` by default.
Partial pivoting makes the procedure more stable, but slower. Partial
pivoting is unnecessary in some cases, including diagonally dominant and
symmetric positive definite matrices (see e.g. theorem 9.12 in [1]).
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
[1] Nicholas J. Higham (2002). Accuracy and Stability of Numerical Algorithms:
Second Edition. SIAM. p. 175. ISBN 978-0-89871-802-7.
"""
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = ([[0, 0] for _ in range(len(t.shape) - 1)] +
[last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
if not m:
raise ValueError('The size of the matrix needs to be known for '
'diagonals_format="matrix"')
# Extract diagonals; use input[..., 0, 0] as "dummy" m-th elements of sub-
# and superdiagonal.
# gather_nd slices into first indices, whereas we need to slice into the
# last two, so transposing back and forth is necessary.
dummy_idx = [0, 0]
indices = ([[[1, 0], [0, 0], dummy_idx]] +
[[[i + 1, i], [i, i], [i - 1, i]] for i in range(1, m - 1)] +
[[dummy_idx, [m - 1, m - 1], [m - 2, m - 1]]])
diagonals = array_ops.transpose(
array_ops.gather_nd(array_ops.transpose(diagonals), indices))
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting,
name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, partial_pivoting, name):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = len(diagonals.shape), len(rhs.shape)
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(diags_rank))
if rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if diagonals.shape[-2] and diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
if not diagonals.shape[:-2].is_compatible_with(rhs.shape[:diags_rank - 2]):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
def check_num_lhs_matches_num_rhs():
if (diagonals.shape[-1] and rhs.shape[-2] and
diagonals.shape[-1] != rhs.shape[-2]):
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name),
-1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
result = linalg_ops.tridiagonal_solve(diagonals, rhs, partial_pivoting, name)
return array_ops.matrix_transpose(result) if transpose_rhs else result
@tf_export('linalg.tridiagonal_matmul')
def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):
r"""Multiplies tridiagonal matrix by matrix.
`diagonals` is representation of 3-diagonal NxN matrix, which depends on
`diagonals_format`.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
If `sequence` format, `diagonals` is list or tuple of three tensors:
`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element
of `superdiag` first element of `subdiag` are ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `sequence` format is recommended as the one with the best performance.
`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.
Example:
```python
superdiag = tf.constant([-1, -1, 0], dtype=tf.float64)
maindiag = tf.constant([2, 2, 2], dtype=tf.float64)
subdiag = tf.constant([0, -1, -1], dtype=tf.float64)
diagonals = [superdiag, maindiag, subdiag]
rhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)
x = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')
```
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.
diagonals_format: one of `sequence`, or `compact`. Default is `compact`.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M, N] containing the result of multiplication.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
superdiag = diagonals[..., 0, :]
maindiag = diagonals[..., 1, :]
subdiag = diagonals[..., 2, :]
elif diagonals_format == 'sequence':
superdiag, maindiag, subdiag = diagonals
elif diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if not m1 or not m2:
raise ValueError('The size of the matrix needs to be known for '
'diagonals_format="matrix"')
if m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
# TODO(b/131695260): use matrix_diag_part when it supports extracting
# arbitrary diagonals.
maindiag = array_ops.matrix_diag_part(diagonals)
diagonals = array_ops.transpose(diagonals)
dummy_index = [0, 0]
superdiag_indices = [[i + 1, i] for i in range(0, m1 - 1)] + [dummy_index]
subdiag_indices = [dummy_index] + [[i - 1, i] for i in range(1, m1)]
superdiag = array_ops.transpose(
array_ops.gather_nd(diagonals, superdiag_indices))
subdiag = array_ops.transpose(
array_ops.gather_nd(diagonals, subdiag_indices))
else:
raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)
# C++ backend requires matrices.
# Converting 1-dimensional vectors to matrices with 1 row.
superdiag = array_ops.expand_dims(superdiag, -2)
maindiag = array_ops.expand_dims(maindiag, -2)
subdiag = array_ops.expand_dims(subdiag, -2)
return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linalg_impl.py
|
tensorflow-master
|
tensorflow/python/ops/linalg/__init__.py
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
class OperatorShapesInfo(object):
"""Object encoding expected shape for a test.
Encodes the expected shape of a matrix for a test. Also
allows additional metadata for the test harness.
"""
def __init__(self, shape, **kwargs):
self.shape = shape
self.__dict__.update(kwargs)
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@staticmethod
def adjoint_options():
return [False, True]
@staticmethod
def adjoint_arg_options():
return [False, True]
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@staticmethod
def use_placeholder_options():
return [False, True]
@staticmethod
def operator_shapes_infos():
"""Returns list of OperatorShapesInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_shapes_infos has not been implemented.")
@abc.abstractmethod
def operator_and_matrix(
self, shapes_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shapes_info: `OperatorShapesInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_rhs is not defined.")
@abc.abstractmethod
def make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_x is not defined.")
@staticmethod
def tests_to_skip():
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
# pylint:disable=missing-docstring
def _test_to_dense(use_placeholder, shapes_info, dtype):
def test_to_dense(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_dense.get_shape())
op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
return test_to_dense
def _test_det(use_placeholder, shapes_info, dtype):
def test_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
return test_det
def _test_log_abs_det(use_placeholder, shapes_info, dtype):
def test_log_abs_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
shapes_info.shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
return test_log_abs_det
def _test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
def _test_matmul(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg):
def test_matmul(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_matmul
def _test_matmul_with_broadcast(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg):
def test_matmul_with_broadcast(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_matmul_with_broadcast
def _test_adjoint(use_placeholder, shapes_info, dtype):
def test_adjoint(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
return test_adjoint
def _test_cholesky(use_placeholder, shapes_info, dtype):
def test_cholesky(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
op_chol = operator.cholesky().to_dense()
mat_chol = linalg_ops.cholesky(mat)
op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol])
self.assertAC(mat_chol_v, op_chol_v)
return test_cholesky
def _test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
rhs = self.make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
self.assertAC(op_solve_v, mat_solve_v)
def _test_solve(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg):
def test_solve(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_solve
def _test_solve_with_broadcast(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg):
def test_solve_with_broadcast(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=False)
return test_solve_with_broadcast
def _test_inverse(use_placeholder, shapes_info, dtype):
def test_inverse(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_inverse_v, mat_inverse_v = sess.run([
operator.inverse().to_dense(), linalg.inv(mat)])
self.assertAC(op_inverse_v, mat_inverse_v)
return test_inverse
def _test_trace(use_placeholder, shapes_info, dtype):
def test_trace(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.get_shape(), mat_trace.get_shape())
op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
return test_trace
def _test_add_to_tensor(use_placeholder, shapes_info, dtype):
def test_add_to_tensor(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_plus_2mat.get_shape())
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
return test_add_to_tensor
def _test_diag_part(use_placeholder, shapes_info, dtype):
def test_diag_part(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.get_shape(),
op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
return test_diag_part
# pylint:enable=missing-docstring
def add_tests(test_cls):
"""Add tests for LinearOperator methods."""
test_name_dict = {
"add_to_tensor": _test_add_to_tensor,
"cholesky": _test_cholesky,
"det": _test_det,
"diag_part": _test_diag_part,
"inverse": _test_inverse,
"log_abs_det": _test_log_abs_det,
"matmul": _test_matmul,
"matmul_with_broadcast": _test_matmul_with_broadcast,
"solve": _test_solve,
"solve_with_broadcast": _test_solve_with_broadcast,
"to_dense": _test_to_dense,
"trace": _test_trace,
}
tests_with_adjoint_args = [
"matmul",
"matmul_with_broadcast",
"solve",
"solve_with_broadcast",
]
for name, test_template_fn in test_name_dict.items():
if name in test_cls.tests_to_skip():
continue
for dtype, use_placeholder, shape_info in itertools.product(
test_cls.dtypes_to_test(),
test_cls.use_placeholder_options(),
test_cls.operator_shapes_infos()):
base_test_name = "_".join([
"test", name, "_shape={},dtype={},use_placeholder={}".format(
shape_info.shape, dtype, use_placeholder)])
if name in tests_with_adjoint_args:
for adjoint in test_cls.adjoint_options():
for adjoint_arg in test_cls.adjoint_arg_options():
test_name = base_test_name + ",adjoint={},adjoint_arg={}".format(
adjoint, adjoint_arg)
if hasattr(test_cls, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(
test_cls,
test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder,
shape_info,
dtype,
adjoint,
adjoint_arg)))
else:
if hasattr(test_cls, base_test_name):
raise RuntimeError("Test %s defined more than once" % base_test_name)
setattr(
test_cls,
base_test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder, shape_info, dtype)))
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((0, 0)),
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def tests_to_skip():
"""List of test names to skip."""
return [
"cholesky",
"inverse",
"solve",
"solve_with_broadcast",
"det",
"log_abs_det"
]
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((2, 1)),
shapes_info((1, 2)),
shapes_info((1, 3, 2)),
shapes_info((3, 3, 4)),
shapes_info((2, 1, 2, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"make_rhs not implemented because we don't test solve")
def make_x(self, operator, adjoint, with_batch=True):
# Return the number of systems for the argument 'x' for .matmul(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape.dims[-1].assert_is_compatible_with(shape.dims[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
eps=1e-4,
seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_test_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registration mechanisms for various n-ary operations on LinearOperators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.util import tf_inspect
_ADJOINTS = {}
_CHOLESKY_DECOMPS = {}
_MATMUL = {}
_SOLVE = {}
_INVERSES = {}
def _registered_function(type_list, registry):
"""Given a list of classes, finds the most specific function registered."""
enumerated_hierarchies = [enumerate(tf_inspect.getmro(t)) for t in type_list]
# Get all possible combinations of hierarchies.
cls_combinations = list(itertools.product(*enumerated_hierarchies))
def hierarchy_distance(cls_combination):
candidate_distance = sum(c[0] for c in cls_combination)
if tuple(c[1] for c in cls_combination) in registry:
return candidate_distance
return 10000
registered_combination = min(cls_combinations, key=hierarchy_distance)
return registry.get(tuple(r[1] for r in registered_combination), None)
def _registered_adjoint(type_a):
"""Get the Adjoint function registered for class a."""
return _registered_function([type_a], _ADJOINTS)
def _registered_cholesky(type_a):
"""Get the Cholesky function registered for class a."""
return _registered_function([type_a], _CHOLESKY_DECOMPS)
def _registered_matmul(type_a, type_b):
"""Get the Matmul function registered for classes a and b."""
return _registered_function([type_a, type_b], _MATMUL)
def _registered_solve(type_a, type_b):
"""Get the Solve function registered for classes a and b."""
return _registered_function([type_a, type_b], _SOLVE)
def _registered_inverse(type_a):
"""Get the Cholesky function registered for class a."""
return _registered_function([type_a], _INVERSES)
def adjoint(lin_op_a, name=None):
"""Get the adjoint associated to lin_op_a.
Args:
lin_op_a: The LinearOperator to take the adjoint of.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the adjoint of `lin_op_a`.
Raises:
NotImplementedError: If no Adjoint method is defined for the LinearOperator
type of `lin_op_a`.
"""
adjoint_fn = _registered_adjoint(type(lin_op_a))
if adjoint_fn is None:
raise ValueError("No adjoint registered for {}".format(
type(lin_op_a)))
with ops.name_scope(name, "Adjoint"):
return adjoint_fn(lin_op_a)
def cholesky(lin_op_a, name=None):
"""Get the Cholesky factor associated to lin_op_a.
Args:
lin_op_a: The LinearOperator to decompose.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the lower Cholesky factor of `lin_op_a`.
Raises:
NotImplementedError: If no Cholesky method is defined for the LinearOperator
type of `lin_op_a`.
"""
cholesky_fn = _registered_cholesky(type(lin_op_a))
if cholesky_fn is None:
raise ValueError("No cholesky decomposition registered for {}".format(
type(lin_op_a)))
with ops.name_scope(name, "Cholesky"):
return cholesky_fn(lin_op_a)
def matmul(lin_op_a, lin_op_b, name=None):
"""Compute lin_op_a.matmul(lin_op_b).
Args:
lin_op_a: The LinearOperator on the left.
lin_op_b: The LinearOperator on the right.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the matmul between `lin_op_a` and
`lin_op_b`.
Raises:
NotImplementedError: If no matmul method is defined between types of
`lin_op_a` and `lin_op_b`.
"""
matmul_fn = _registered_matmul(type(lin_op_a), type(lin_op_b))
if matmul_fn is None:
raise ValueError("No matmul registered for {}.matmul({})".format(
type(lin_op_a), type(lin_op_b)))
with ops.name_scope(name, "Matmul"):
return matmul_fn(lin_op_a, lin_op_b)
def solve(lin_op_a, lin_op_b, name=None):
"""Compute lin_op_a.solve(lin_op_b).
Args:
lin_op_a: The LinearOperator on the left.
lin_op_b: The LinearOperator on the right.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the solve between `lin_op_a` and
`lin_op_b`.
Raises:
NotImplementedError: If no solve method is defined between types of
`lin_op_a` and `lin_op_b`.
"""
solve_fn = _registered_solve(type(lin_op_a), type(lin_op_b))
if solve_fn is None:
raise ValueError("No solve registered for {}.solve({})".format(
type(lin_op_a), type(lin_op_b)))
with ops.name_scope(name, "Solve"):
return solve_fn(lin_op_a, lin_op_b)
def inverse(lin_op_a, name=None):
"""Get the Inverse associated to lin_op_a.
Args:
lin_op_a: The LinearOperator to decompose.
name: Name to use for this operation.
Returns:
A LinearOperator that represents the inverse of `lin_op_a`.
Raises:
NotImplementedError: If no Inverse method is defined for the LinearOperator
type of `lin_op_a`.
"""
inverse_fn = _registered_inverse(type(lin_op_a))
if inverse_fn is None:
raise ValueError("No inverse registered for {}".format(
type(lin_op_a)))
with ops.name_scope(name, "Inverse"):
return inverse_fn(lin_op_a)
class RegisterAdjoint(object):
"""Decorator to register an Adjoint implementation function.
Usage:
@linear_operator_algebra.RegisterAdjoint(lin_op.LinearOperatorIdentity)
def _adjoint_identity(lin_op_a):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to decompose.
"""
self._key = (lin_op_cls_a,)
def __call__(self, adjoint_fn):
"""Perform the Adjoint registration.
Args:
adjoint_fn: The function to use for the Adjoint.
Returns:
adjoint_fn
Raises:
TypeError: if adjoint_fn is not a callable.
ValueError: if a Adjoint function has already been registered for
the given argument classes.
"""
if not callable(adjoint_fn):
raise TypeError(
"adjoint_fn must be callable, received: {}".format(adjoint_fn))
if self._key in _ADJOINTS:
raise ValueError("Adjoint({}) has already been registered to: {}".format(
self._key[0].__name__, _ADJOINTS[self._key]))
_ADJOINTS[self._key] = adjoint_fn
return adjoint_fn
class RegisterCholesky(object):
"""Decorator to register a Cholesky implementation function.
Usage:
@linear_operator_algebra.RegisterCholesky(lin_op.LinearOperatorIdentity)
def _cholesky_identity(lin_op_a):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to decompose.
"""
self._key = (lin_op_cls_a,)
def __call__(self, cholesky_fn):
"""Perform the Cholesky registration.
Args:
cholesky_fn: The function to use for the Cholesky.
Returns:
cholesky_fn
Raises:
TypeError: if cholesky_fn is not a callable.
ValueError: if a Cholesky function has already been registered for
the given argument classes.
"""
if not callable(cholesky_fn):
raise TypeError(
"cholesky_fn must be callable, received: {}".format(cholesky_fn))
if self._key in _CHOLESKY_DECOMPS:
raise ValueError("Cholesky({}) has already been registered to: {}".format(
self._key[0].__name__, _CHOLESKY_DECOMPS[self._key]))
_CHOLESKY_DECOMPS[self._key] = cholesky_fn
return cholesky_fn
class RegisterMatmul(object):
"""Decorator to register a Matmul implementation function.
Usage:
@linear_operator_algebra.RegisterMatmul(
lin_op.LinearOperatorIdentity,
lin_op.LinearOperatorIdentity)
def _matmul_identity(a, b):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a, lin_op_cls_b):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to multiply.
lin_op_cls_b: the class of the second LinearOperator to multiply.
"""
self._key = (lin_op_cls_a, lin_op_cls_b)
def __call__(self, matmul_fn):
"""Perform the Matmul registration.
Args:
matmul_fn: The function to use for the Matmul.
Returns:
matmul_fn
Raises:
TypeError: if matmul_fn is not a callable.
ValueError: if a Matmul function has already been registered for
the given argument classes.
"""
if not callable(matmul_fn):
raise TypeError(
"matmul_fn must be callable, received: {}".format(matmul_fn))
if self._key in _MATMUL:
raise ValueError("Matmul({}, {}) has already been registered.".format(
self._key[0].__name__,
self._key[1].__name__))
_MATMUL[self._key] = matmul_fn
return matmul_fn
class RegisterSolve(object):
"""Decorator to register a Solve implementation function.
Usage:
@linear_operator_algebra.RegisterSolve(
lin_op.LinearOperatorIdentity,
lin_op.LinearOperatorIdentity)
def _solve_identity(a, b):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a, lin_op_cls_b):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator that is computing solve.
lin_op_cls_b: the class of the second LinearOperator to solve.
"""
self._key = (lin_op_cls_a, lin_op_cls_b)
def __call__(self, solve_fn):
"""Perform the Solve registration.
Args:
solve_fn: The function to use for the Solve.
Returns:
solve_fn
Raises:
TypeError: if solve_fn is not a callable.
ValueError: if a Solve function has already been registered for
the given argument classes.
"""
if not callable(solve_fn):
raise TypeError(
"solve_fn must be callable, received: {}".format(solve_fn))
if self._key in _SOLVE:
raise ValueError("Solve({}, {}) has already been registered.".format(
self._key[0].__name__,
self._key[1].__name__))
_SOLVE[self._key] = solve_fn
return solve_fn
class RegisterInverse(object):
"""Decorator to register an Inverse implementation function.
Usage:
@linear_operator_algebra.RegisterInverse(lin_op.LinearOperatorIdentity)
def _inverse_identity(lin_op_a):
# Return the identity matrix.
"""
def __init__(self, lin_op_cls_a):
"""Initialize the LinearOperator registrar.
Args:
lin_op_cls_a: the class of the LinearOperator to decompose.
"""
self._key = (lin_op_cls_a,)
def __call__(self, inverse_fn):
"""Perform the Inverse registration.
Args:
inverse_fn: The function to use for the Inverse.
Returns:
inverse_fn
Raises:
TypeError: if inverse_fn is not a callable.
ValueError: if a Inverse function has already been registered for
the given argument classes.
"""
if not callable(inverse_fn):
raise TypeError(
"inverse_fn must be callable, received: {}".format(inverse_fn))
if self._key in _INVERSES:
raise ValueError("Inverse({}) has already been registered to: {}".format(
self._key[0].__name__, _INVERSES[self._key]))
_INVERSES[self._key] = inverse_fn
return inverse_fn
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_algebra.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_block_diag
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_householder
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_inversion
from tensorflow.python.ops.linalg import linear_operator_kronecker
# By default, return LinearOperatorInversion which switched the .matmul
# and .solve methods.
@linear_operator_algebra.RegisterInverse(linear_operator.LinearOperator)
def _inverse_linear_operator(linop):
return linear_operator_inversion.LinearOperatorInversion(
linop,
is_non_singular=linop.is_non_singular,
is_self_adjoint=linop.is_self_adjoint,
is_positive_definite=linop.is_positive_definite,
is_square=linop.is_square)
@linear_operator_algebra.RegisterInverse(
linear_operator_inversion.LinearOperatorInversion)
def _inverse_inverse_linear_operator(linop_inversion):
return linop_inversion.operator
@linear_operator_algebra.RegisterInverse(
linear_operator_diag.LinearOperatorDiag)
def _inverse_diag(diag_operator):
return linear_operator_diag.LinearOperatorDiag(
1. / diag_operator.diag,
is_non_singular=diag_operator.is_non_singular,
is_self_adjoint=diag_operator.is_self_adjoint,
is_positive_definite=diag_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterInverse(
linear_operator_identity.LinearOperatorIdentity)
def _inverse_identity(identity_operator):
return identity_operator
@linear_operator_algebra.RegisterInverse(
linear_operator_identity.LinearOperatorScaledIdentity)
def _inverse_scaled_identity(identity_operator):
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
multiplier=1. / identity_operator.multiplier,
is_non_singular=identity_operator.is_non_singular,
is_self_adjoint=True,
is_positive_definite=identity_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterInverse(
linear_operator_block_diag.LinearOperatorBlockDiag)
def _inverse_block_diag(block_diag_operator):
# We take the inverse of each block on the diagonal.
return linear_operator_block_diag.LinearOperatorBlockDiag(
operators=[
operator.inverse() for operator in block_diag_operator.operators],
is_non_singular=block_diag_operator.is_non_singular,
is_self_adjoint=block_diag_operator.is_self_adjoint,
is_positive_definite=block_diag_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterInverse(
linear_operator_kronecker.LinearOperatorKronecker)
def _inverse_kronecker(kronecker_operator):
# Inverse decomposition of a Kronecker product is the Kronecker product
# of inverse decompositions.
return linear_operator_kronecker.LinearOperatorKronecker(
operators=[
operator.inverse() for operator in kronecker_operator.operators],
is_non_singular=kronecker_operator.is_non_singular,
is_self_adjoint=kronecker_operator.is_self_adjoint,
is_positive_definite=kronecker_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterInverse(
linear_operator_circulant.LinearOperatorCirculant)
def _inverse_circulant(circulant_operator):
# Inverting the spectrum is sufficient to get the inverse.
return linear_operator_circulant.LinearOperatorCirculant(
spectrum=1. / circulant_operator.spectrum,
is_non_singular=circulant_operator.is_non_singular,
is_self_adjoint=circulant_operator.is_self_adjoint,
is_positive_definite=circulant_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterInverse(
linear_operator_householder.LinearOperatorHouseholder)
def _inverse_householder(householder_operator):
return householder_operator
|
tensorflow-master
|
tensorflow/python/ops/linalg/inverse_registrations.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for tf.linalg namespace."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.linalg import adjoint_registrations as _adjoint_registrations
from tensorflow.python.ops.linalg import cholesky_registrations as _cholesky_registrations
from tensorflow.python.ops.linalg import inverse_registrations as _inverse_registrations
from tensorflow.python.ops.linalg import linear_operator_algebra as _linear_operator_algebra
from tensorflow.python.ops.linalg import matmul_registrations as _matmul_registrations
from tensorflow.python.ops.linalg import solve_registrations as _solve_registrations
from tensorflow.python.ops.linalg.linalg_impl import *
from tensorflow.python.ops.linalg.linear_operator import *
from tensorflow.python.ops.linalg.linear_operator_block_diag import *
from tensorflow.python.ops.linalg.linear_operator_circulant import *
from tensorflow.python.ops.linalg.linear_operator_composition import *
from tensorflow.python.ops.linalg.linear_operator_diag import *
from tensorflow.python.ops.linalg.linear_operator_full_matrix import *
from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_kronecker import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
from tensorflow.python.ops.linalg.linear_operator_toeplitz import *
from tensorflow.python.ops.linalg.linear_operator_zeros import *
# pylint: enable=wildcard-import
# Seal API.
# pylint: disable=undefined-variable
del absolute_import
del division
del print_function
del ops
del array_ops
del gen_linalg_ops
del linalg_ops
del math_ops
del special_math_ops
del tf_export
# pylint: enable=undefined-variable
|
tensorflow-master
|
tensorflow/python/ops/linalg/linalg.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a zero matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorZeros",
]
@tf_export("linalg.LinearOperatorZeros")
class LinearOperatorZeros(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] zero matrix.
This operator acts like a [batch] zero matrix `A` with shape
`[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x M` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorZeros` is initialized with `num_rows`, and optionally
`num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is
`None`, then this operator will be initialized as a square matrix. If
`batch_shape` is `None`, this operator efficiently passes through all
arguments. If `batch_shape` is provided, broadcasting may occur, which will
require making copies.
```python
# Create a 2 x 2 zero matrix.
operator = LinearOperatorZero(num_rows=2, dtype=tf.float32)
operator.to_dense()
==> [[0., 0.]
[0., 0.]]
operator.shape
==> [2, 2]
operator.determinant()
==> 0.
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor, same as x.
# Create a 2-batch of 2x2 zero matrices
operator = LinearOperatorZeros(num_rows=2, batch_shape=[2])
operator.to_dense()
==> [[[0., 0.]
[0., 0.]],
[[0., 0.]
[0., 0.]]]
# Here, even though the operator has a batch shape, the input is the same as
# the output, so x can be passed through without a copy. The operator is able
# to detect that no broadcast is necessary because both x and the operator
# have statically defined shape.
x = ... Shape [2, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x)
# Here the operator and x have different batch_shape, and are broadcast.
# This requires a copy, since the output is different size than the input.
x = ... Shape [1, 2, 3]
operator.matmul(x)
==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x])
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, M], with b >= 0
x.shape = [C1,...,Cc] + [M, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
num_rows,
num_columns=None,
batch_shape=None,
dtype=None,
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False,
is_square=True,
assert_proper_shapes=False,
name="LinearOperatorZeros"):
r"""Initialize a `LinearOperatorZeros`.
The `LinearOperatorZeros` is initialized with arguments defining `dtype`
and shape.
This operator is able to broadcast the leading (batch) dimensions, which
sometimes requires copying data. If `batch_shape` is `None`, the operator
can take arguments of any batch shape without copying. See examples.
Args:
num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
corresponding zero matrix.
num_columns: Scalar non-negative integer `Tensor`. Number of columns in
the corresponding zero matrix. If `None`, defaults to the value of
`num_rows`.
batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
dimensions. If `None`, this operator has no leading dimensions.
dtype: Data type of the matrix that this operator represents.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
assert_proper_shapes: Python `bool`. If `False`, only perform static
checks that initialization and method arguments have proper shape.
If `True`, and static checks are inconclusive, add asserts to the graph.
name: A name for this `LinearOperator`
Raises:
ValueError: If `num_rows` is determined statically to be non-scalar, or
negative.
ValueError: If `num_columns` is determined statically to be non-scalar,
or negative.
ValueError: If `batch_shape` is determined statically to not be 1-D, or
negative.
ValueError: If any of the following is not `True`:
`{is_self_adjoint, is_non_singular, is_positive_definite}`.
"""
dtype = dtype or dtypes.float32
self._assert_proper_shapes = assert_proper_shapes
with ops.name_scope(name):
dtype = dtypes.as_dtype(dtype)
if not is_self_adjoint and is_square:
raise ValueError("A zero operator is always self adjoint.")
if is_non_singular:
raise ValueError("A zero operator is always singular.")
if is_positive_definite:
raise ValueError("A zero operator is always not positive-definite.")
super(LinearOperatorZeros, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
self._num_rows = linear_operator_util.shape_tensor(
num_rows, name="num_rows")
self._num_rows_static = tensor_util.constant_value(self._num_rows)
if num_columns is None:
num_columns = num_rows
self._num_columns = linear_operator_util.shape_tensor(
num_columns, name="num_columns")
self._num_columns_static = tensor_util.constant_value(self._num_columns)
self._check_domain_range_possibly_add_asserts()
if (self._num_rows_static is not None and
self._num_columns_static is not None):
if is_square and self._num_rows_static != self._num_columns_static:
raise ValueError(
"LinearOperatorZeros initialized as is_square=True, but got "
"num_rows({}) != num_columns({})".format(
self._num_rows_static,
self._num_columns_static))
if batch_shape is None:
self._batch_shape_arg = None
else:
self._batch_shape_arg = linear_operator_util.shape_tensor(
batch_shape, name="batch_shape_arg")
self._batch_shape_static = tensor_util.constant_value(
self._batch_shape_arg)
self._check_batch_shape_possibly_add_asserts()
def _shape(self):
matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
self._num_columns_static))
if self._batch_shape_arg is None:
return matrix_shape
batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
matrix_shape = array_ops.stack((self._num_rows, self._num_columns), axis=0)
if self._batch_shape_arg is None:
return matrix_shape
return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
def _assert_non_singular(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Zero operators are always "
"non-invertible.")
def _assert_positive_definite(self):
raise errors.InvalidArgumentError(
node_def=None, op=None, message="Zero operators are always "
"non-positive definite.")
def _assert_self_adjoint(self):
return control_flow_ops.no_op("assert_self_adjoint")
def _possibly_broadcast_batch_shape(self, x):
"""Return 'x', possibly after broadcasting the leading dimensions."""
# If we have no batch shape, our batch shape broadcasts with everything!
if self._batch_shape_arg is None:
return x
# Static attempt:
# If we determine that no broadcast is necessary, pass x through
# If we need a broadcast, add to an array of zeros.
#
# special_shape is the shape that, when broadcast with x's shape, will give
# the correct broadcast_shape. Note that
# We have already verified the second to last dimension of self.shape
# matches x's shape in assert_compatible_matrix_dimensions.
# Also, the final dimension of 'x' can have any shape.
# Therefore, the final two dimensions of special_shape are 1's.
special_shape = self.batch_shape.concatenate([1, 1])
bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
if special_shape.is_fully_defined():
# bshape.is_fully_defined iff special_shape.is_fully_defined.
if bshape == x.get_shape():
return x
# Use the built in broadcasting of addition.
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
# Dynamic broadcast:
# Always add to an array of zeros, rather than using a "cond", since a
# cond would require copying data from GPU --> CPU.
special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
return x + zeros
def _matmul(self, x, adjoint=False, adjoint_arg=False):
if self._assert_proper_shapes:
x = linalg.adjoint(x) if adjoint_arg else x
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
if self.is_square:
# Note that adjoint has no effect since this matrix is self-adjoint.
if adjoint_arg:
output_shape = array_ops.concat([
array_ops.shape(x)[:-2],
[array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0)
else:
output_shape = array_ops.shape(x)
return self._possibly_broadcast_batch_shape(
array_ops.zeros(shape=output_shape, dtype=x.dtype))
x_shape = array_ops.shape(x)
n = self._num_columns if adjoint else self._num_rows
m = x_shape[-2] if adjoint_arg else x_shape[-1]
output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0)
zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype)
return self._possibly_broadcast_batch_shape(zeros)
def _determinant(self):
if self.batch_shape.is_fully_defined():
return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
else:
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _trace(self):
# Get Tensor of all zeros of same shape as self.batch_shape.
if self.batch_shape.is_fully_defined():
return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
else:
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _diag_part(self):
return self._zeros_diag()
def add_to_tensor(self, mat, name="add_to_tensor"):
"""Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
return self._possibly_broadcast_batch_shape(mat)
def _check_domain_range_possibly_add_asserts(self):
"""Static check of init arg `num_rows`, possibly add asserts."""
# Possibly add asserts.
if self._assert_proper_shapes:
self._num_rows = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_rows,
0,
message="Argument num_rows must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_rows,
message="Argument num_rows must be non-negative."),
], self._num_rows)
self._num_columns = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._num_columns,
0,
message="Argument num_columns must be a 0-D Tensor."),
check_ops.assert_non_negative(
self._num_columns,
message="Argument num_columns must be non-negative."),
], self._num_columns)
# Static checks.
if not self._num_rows.dtype.is_integer:
raise TypeError("Argument num_rows must be integer type. Found:"
" %s" % self._num_rows)
if not self._num_columns.dtype.is_integer:
raise TypeError("Argument num_columns must be integer type. Found:"
" %s" % self._num_columns)
num_rows_static = self._num_rows_static
num_columns_static = self._num_columns_static
if num_rows_static is not None:
if num_rows_static.ndim != 0:
raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
" %s" % num_rows_static)
if num_rows_static < 0:
raise ValueError("Argument num_rows must be non-negative. Found:"
" %s" % num_rows_static)
if num_columns_static is not None:
if num_columns_static.ndim != 0:
raise ValueError("Argument num_columns must be a 0-D Tensor. Found:"
" %s" % num_columns_static)
if num_columns_static < 0:
raise ValueError("Argument num_columns must be non-negative. Found:"
" %s" % num_columns_static)
def _check_batch_shape_possibly_add_asserts(self):
"""Static check of init arg `batch_shape`, possibly add asserts."""
if self._batch_shape_arg is None:
return
# Possibly add asserts
if self._assert_proper_shapes:
self._batch_shape_arg = control_flow_ops.with_dependencies([
check_ops.assert_rank(
self._batch_shape_arg,
1,
message="Argument batch_shape must be a 1-D Tensor."),
check_ops.assert_non_negative(
self._batch_shape_arg,
message="Argument batch_shape must be non-negative."),
], self._batch_shape_arg)
# Static checks
if not self._batch_shape_arg.dtype.is_integer:
raise TypeError("Argument batch_shape must be integer type. Found:"
" %s" % self._batch_shape_arg)
if self._batch_shape_static is None:
return # Cannot do any other static checks.
if self._batch_shape_static.ndim != 1:
raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
" %s" % self._batch_shape_static)
if np.any(self._batch_shape_static < 0):
raise ValueError("Argument batch_shape must be non-negative. Found:"
"%s" % self._batch_shape_static)
def _min_matrix_dim(self):
"""Minimum of domain/range dimension, if statically available, else None."""
domain_dim = self.domain_dimension.value
range_dim = self.range_dimension.value
if domain_dim is None or range_dim is None:
return None
return min(domain_dim, range_dim)
def _min_matrix_dim_tensor(self):
"""Minimum of domain/range dimension, as a tensor."""
return math_ops.reduce_min(self.shape_tensor()[-2:])
def _zeros_diag(self):
"""Returns the diagonal of this operator as all zeros."""
if self.shape.is_fully_defined():
d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
else:
d_shape = array_ops.concat(
[self.batch_shape_tensor(),
[self._min_matrix_dim_tensor()]], axis=0)
return array_ops.zeros(shape=d_shape, dtype=self.dtype)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_zeros.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Add one or more `LinearOperators` efficiently."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_full_matrix
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
__all__ = []
def add_operators(operators,
operator_name=None,
addition_tiers=None,
name=None):
"""Efficiently add one or more linear operators.
Given operators `[A1, A2,...]`, this `Op` returns a possibly shorter list of
operators `[B1, B2,...]` such that
```sum_k Ak.matmul(x) = sum_k Bk.matmul(x).```
The operators `Bk` result by adding some of the `Ak`, as allowed by
`addition_tiers`.
Example of efficient adding of diagonal operators.
```python
A1 = LinearOperatorDiag(diag=[1., 1.], name="A1")
A2 = LinearOperatorDiag(diag=[2., 2.], name="A2")
# Use two tiers, the first contains an Adder that returns Diag. Since both
# A1 and A2 are Diag, they can use this Adder. The second tier will not be
# used.
addition_tiers = [
[_AddAndReturnDiag()],
[_AddAndReturnMatrix()]]
B_list = add_operators([A1, A2], addition_tiers=addition_tiers)
len(B_list)
==> 1
B_list[0].__class__.__name__
==> 'LinearOperatorDiag'
B_list[0].to_dense()
==> [[3., 0.],
[0., 3.]]
B_list[0].name
==> 'Add/A1__A2/'
```
Args:
operators: Iterable of `LinearOperator` objects with same `dtype`, domain
and range dimensions, and broadcastable batch shapes.
operator_name: String name for returned `LinearOperator`. Defaults to
concatenation of "Add/A__B/" that indicates the order of addition steps.
addition_tiers: List tiers, like `[tier_0, tier_1, ...]`, where `tier_i`
is a list of `Adder` objects. This function attempts to do all additions
in tier `i` before trying tier `i + 1`.
name: A name for this `Op`. Defaults to `add_operators`.
Returns:
Subclass of `LinearOperator`. Class and order of addition may change as new
(and better) addition strategies emerge.
Raises:
ValueError: If `operators` argument is empty.
ValueError: If shapes are incompatible.
"""
# Default setting
if addition_tiers is None:
addition_tiers = _DEFAULT_ADDITION_TIERS
# Argument checking.
check_ops.assert_proper_iterable(operators)
operators = list(reversed(operators))
if len(operators) < 1:
raise ValueError(
"Argument 'operators' must contain at least one operator. "
"Found: %s" % operators)
if not all(
isinstance(op, linear_operator.LinearOperator) for op in operators):
raise TypeError(
"Argument 'operators' must contain only LinearOperator instances. "
"Found: %s" % operators)
_static_check_for_same_dimensions(operators)
_static_check_for_broadcastable_batch_shape(operators)
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
with ops.name_scope(name or "add_operators", values=graph_parents):
# Additions done in one of the tiers. Try tier 0, 1,...
ops_to_try_at_next_tier = list(operators)
for tier in addition_tiers:
ops_to_try_at_this_tier = ops_to_try_at_next_tier
ops_to_try_at_next_tier = []
while ops_to_try_at_this_tier:
op1 = ops_to_try_at_this_tier.pop()
op2, adder = _pop_a_match_at_tier(op1, ops_to_try_at_this_tier, tier)
if op2 is not None:
# Will try to add the result of this again at this same tier.
new_operator = adder.add(op1, op2, operator_name)
ops_to_try_at_this_tier.append(new_operator)
else:
ops_to_try_at_next_tier.append(op1)
return ops_to_try_at_next_tier
def _pop_a_match_at_tier(op1, operator_list, tier):
# Search from the back of list to the front in order to create nice default
# order of operations.
for i in range(1, len(operator_list) + 1):
op2 = operator_list[-i]
for adder in tier:
if adder.can_add(op1, op2):
return operator_list.pop(-i), adder
return None, None
def _infer_hints_allowing_override(op1, op2, hints):
"""Infer hints from op1 and op2. hints argument is an override.
Args:
op1: LinearOperator
op2: LinearOperator
hints: _Hints object holding "is_X" boolean hints to use for returned
operator.
If some hint is None, try to set using op1 and op2. If the
hint is provided, ignore op1 and op2 hints. This allows an override
of previous hints, but does not allow forbidden hints (e.g. you still
cannot say a real diagonal operator is not self-adjoint.
Returns:
_Hints object.
"""
hints = hints or _Hints()
# If A, B are self-adjoint, then so is A + B.
if hints.is_self_adjoint is None:
is_self_adjoint = op1.is_self_adjoint and op2.is_self_adjoint
else:
is_self_adjoint = hints.is_self_adjoint
# If A, B are positive definite, then so is A + B.
if hints.is_positive_definite is None:
is_positive_definite = op1.is_positive_definite and op2.is_positive_definite
else:
is_positive_definite = hints.is_positive_definite
# A positive definite operator is always non-singular.
if is_positive_definite and hints.is_positive_definite is None:
is_non_singular = True
else:
is_non_singular = hints.is_non_singular
return _Hints(
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite)
def _static_check_for_same_dimensions(operators):
"""ValueError if operators determined to have different dimensions."""
if len(operators) < 2:
return
domain_dimensions = [
(op.name, tensor_shape.dimension_value(op.domain_dimension))
for op in operators
if tensor_shape.dimension_value(op.domain_dimension) is not None]
if len(set(value for name, value in domain_dimensions)) > 1:
raise ValueError("Operators must have the same domain dimension. Found: %s"
% domain_dimensions)
range_dimensions = [
(op.name, tensor_shape.dimension_value(op.range_dimension))
for op in operators
if tensor_shape.dimension_value(op.range_dimension) is not None]
if len(set(value for name, value in range_dimensions)) > 1:
raise ValueError("Operators must have the same range dimension. Found: %s" %
range_dimensions)
def _static_check_for_broadcastable_batch_shape(operators):
"""ValueError if operators determined to have non-broadcastable shapes."""
if len(operators) < 2:
return
# This will fail if they cannot be broadcast together.
batch_shape = operators[0].batch_shape
for op in operators[1:]:
batch_shape = array_ops.broadcast_static_shape(batch_shape, op.batch_shape)
class _Hints(object):
"""Holds 'is_X' flags that every LinearOperator is initialized with."""
def __init__(self,
is_non_singular=None,
is_positive_definite=None,
is_self_adjoint=None):
self.is_non_singular = is_non_singular
self.is_positive_definite = is_positive_definite
self.is_self_adjoint = is_self_adjoint
################################################################################
# Classes to add two linear operators.
################################################################################
@six.add_metaclass(abc.ABCMeta)
class _Adder(object):
"""Abstract base class to add two operators.
Each `Adder` acts independently, adding everything it can, paying no attention
as to whether another `Adder` could have done the addition more efficiently.
"""
@property
def name(self):
return self.__class__.__name__
@abc.abstractmethod
def can_add(self, op1, op2):
"""Returns `True` if this `Adder` can add `op1` and `op2`. Else `False`."""
pass
@abc.abstractmethod
def _add(self, op1, op2, operator_name, hints):
# Derived classes can assume op1 and op2 have been validated, e.g. they have
# the same dtype, and their domain/range dimensions match.
pass
def add(self, op1, op2, operator_name, hints=None):
"""Return new `LinearOperator` acting like `op1 + op2`.
Args:
op1: `LinearOperator`
op2: `LinearOperator`, with `shape` and `dtype` such that adding to
`op1` is allowed.
operator_name: `String` name to give to returned `LinearOperator`
hints: `_Hints` object. Returned `LinearOperator` will be created with
these hints.
Returns:
`LinearOperator`
"""
updated_hints = _infer_hints_allowing_override(op1, op2, hints)
if operator_name is None:
operator_name = "Add/" + op1.name + "__" + op2.name + "/"
values = op1.graph_parents + op2.graph_parents
scope_name = self.name
if scope_name.startswith("_"):
scope_name = scope_name[1:]
with ops.name_scope(scope_name, values=values):
return self._add(op1, op2, operator_name, updated_hints)
class _AddAndReturnScaledIdentity(_Adder):
"""Handles additions resulting in an Identity family member.
The Identity (`LinearOperatorScaledIdentity`, `LinearOperatorIdentity`) family
is closed under addition. This `Adder` respects that, and returns an Identity
"""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_IDENTITY_FAMILY)
def _add(self, op1, op2, operator_name, hints):
# Will build a LinearOperatorScaledIdentity.
if _type(op1) == _SCALED_IDENTITY:
multiplier_1 = op1.multiplier
else:
multiplier_1 = array_ops.ones(op1.batch_shape_tensor(), dtype=op1.dtype)
if _type(op2) == _SCALED_IDENTITY:
multiplier_2 = op2.multiplier
else:
multiplier_2 = array_ops.ones(op2.batch_shape_tensor(), dtype=op2.dtype)
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=op1.range_dimension_tensor(),
multiplier=multiplier_1 + multiplier_2,
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnDiag(_Adder):
"""Handles additions resulting in a Diag operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE)
def _add(self, op1, op2, operator_name, hints):
return linear_operator_diag.LinearOperatorDiag(
diag=op1.diag_part() + op2.diag_part(),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnTriL(_Adder):
"""Handles additions resulting in a TriL operator."""
def can_add(self, op1, op2):
types = {_type(op1), _type(op2)}
return not types.difference(_DIAG_LIKE.union({_TRIL}))
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
class _AddAndReturnMatrix(_Adder):
""""Handles additions resulting in a `LinearOperatorFullMatrix`."""
def can_add(self, op1, op2): # pylint: disable=unused-argument
return isinstance(op1, linear_operator.LinearOperator) and isinstance(
op2, linear_operator.LinearOperator)
def _add(self, op1, op2, operator_name, hints):
if _type(op1) in _EFFICIENT_ADD_TO_TENSOR:
op_add_to_tensor, op_other = op1, op2
else:
op_add_to_tensor, op_other = op2, op1
return linear_operator_full_matrix.LinearOperatorFullMatrix(
matrix=op_add_to_tensor.add_to_tensor(op_other.to_dense()),
is_non_singular=hints.is_non_singular,
is_self_adjoint=hints.is_self_adjoint,
is_positive_definite=hints.is_positive_definite,
name=operator_name)
################################################################################
# Constants designating types of LinearOperators
################################################################################
# Type name constants for LinearOperator classes.
_IDENTITY = "identity"
_SCALED_IDENTITY = "scaled_identity"
_DIAG = "diag"
_TRIL = "tril"
_MATRIX = "matrix"
# Groups of operators.
_DIAG_LIKE = {_DIAG, _IDENTITY, _SCALED_IDENTITY}
_IDENTITY_FAMILY = {_IDENTITY, _SCALED_IDENTITY}
# operators with an efficient .add_to_tensor() method.
_EFFICIENT_ADD_TO_TENSOR = _DIAG_LIKE
def _type(operator):
"""Returns the type name constant (e.g. _TRIL) for operator."""
if isinstance(operator, linear_operator_diag.LinearOperatorDiag):
return _DIAG
if isinstance(operator,
linear_operator_lower_triangular.LinearOperatorLowerTriangular):
return _TRIL
if isinstance(operator, linear_operator_full_matrix.LinearOperatorFullMatrix):
return _MATRIX
if isinstance(operator, linear_operator_identity.LinearOperatorIdentity):
return _IDENTITY
if isinstance(operator,
linear_operator_identity.LinearOperatorScaledIdentity):
return _SCALED_IDENTITY
raise TypeError("Operator type unknown: %s" % operator)
################################################################################
# Addition tiers:
# We attempt to use Adders in tier K before K+1.
#
# Organize tiers to
# (i) reduce O(..) complexity of forming final operator, and
# (ii) produce the "most efficient" final operator.
# Dev notes:
# * Results of addition at tier K will be added at tier K or higher.
# * Tiers may change, and we warn the user that it may change.
################################################################################
# Note that the final tier, _AddAndReturnMatrix, will convert everything to a
# dense matrix. So it is sometimes very inefficient.
_DEFAULT_ADDITION_TIERS = [
[_AddAndReturnScaledIdentity()],
[_AddAndReturnDiag()],
[_AddAndReturnTriL()],
[_AddAndReturnMatrix()],
]
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_addition.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorLowRankUpdate",
]
@tf_export("linalg.LinearOperatorLowRankUpdate")
class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorLowRankUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag_update=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorLowRankUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag_update=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorLowRankUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.matmul(x)` on
`x` having `x.shape = [N, R]` with `O(L_matmul*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.matmul(x)` is `O(L_matmul*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_matmul*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular`, `self_adjoint`, `positive_definite`,
`diag_update_positive` and `square`. These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag_update=None,
v=None,
is_diag_update_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorLowRankUpdate"):
"""Initialize a `LinearOperatorLowRankUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]`.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag_update: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype`
as `base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the perturbation is not square.
is_diag_update_positive: Python `bool`.
If `True`, expect `diag_update > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_update_positive`, in which case
this defaults to `True`.
Note that we say an operator is positive definite when the quadratic
form `x^H A x` has positive real part for all nonzero `x`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
dtype = base_operator.dtype
if diag_update is not None:
if is_diag_update_positive and dtype.is_complex:
logging.warn("Note: setting is_diag_update_positive with a complex "
"dtype means that diagonal is real and positive.")
if diag_update is None:
if is_diag_update_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_update_positive' to False.")
is_diag_update_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_update_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag_update, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = ops.convert_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = ops.convert_to_tensor(v, name="v")
if diag_update is None:
self._diag_update = None
else:
self._diag_update = ops.convert_to_tensor(
diag_update, name="diag_update")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [
self.u, self._diag_update, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorLowRankUpdate, self).__init__(
dtype=self._base_operator.dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Create the diagonal operator D.
self._set_diag_operators(diag_update, is_diag_update_positive)
self._is_diag_update_positive = is_diag_update_positive
self._check_shapes()
# Pre-compute the so-called "capacitance" matrix
# C := D^{-1} + V^H L^{-1} U
self._capacitance = self._make_capacitance()
if self._use_cholesky:
self._chol_capacitance = linalg_ops.cholesky(self._capacitance)
def _check_shapes(self):
"""Static check that shapes are compatible."""
# Broadcast shape also checks that u and v are compatible.
uv_shape = array_ops.broadcast_static_shape(
self.u.get_shape(), self.v.get_shape())
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape, uv_shape[:-2])
tensor_shape.Dimension(
self.base_operator.domain_dimension).assert_is_compatible_with(
uv_shape[-2])
if self._diag_update is not None:
tensor_shape.dimension_at_index(uv_shape, -1).assert_is_compatible_with(
self._diag_update.get_shape()[-1])
array_ops.broadcast_static_shape(
batch_shape, self._diag_update.get_shape()[:-1])
def _set_diag_operators(self, diag_update, is_diag_update_positive):
"""Set attributes self._diag_update and self._diag_operator."""
if diag_update is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag_update, is_positive_definite=is_diag_update_positive)
self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
1. / self._diag_update, is_positive_definite=is_diag_update_positive)
else:
if tensor_shape.dimension_value(self.u.shape[-1]) is not None:
r = tensor_shape.dimension_value(self.u.shape[-1])
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
self._diag_inv_operator = self._diag_operator
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def is_diag_update_positive(self):
"""If this operator is `A = L + U D V^H`, this hints `D > 0` elementwise."""
return self._is_diag_update_positive
@property
def diag_update(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag_update
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.u.get_shape()[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
array_ops.shape(self.u)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
u = self.u
v = self.v
l = self.base_operator
d = self.diag_operator
leading_term = l.matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_uh_x = d.matmul(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True, adjoint_b=adjoint_arg)
d_vh_x = d.matmul(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._capacitance)
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), axis=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
if self.dtype.is_complex:
log_abs_det_c = math_ops.cast(log_abs_det_c, dtype=self.dtype)
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
v = self.u
u = self.v
else:
v = self.v
u = self.u
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linear_operator_util.cholesky_solve_with_broadcast(
self._chol_capacitance, vh_linv_rhs)
else:
capinv_vh_linv_rhs = linear_operator_util.matrix_solve_with_broadcast(
self._capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(self.u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vh_linv_u)
return capacitance
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_low_rank_update.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` coming from a [[nested] block] circulant matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"LinearOperatorCirculant",
"LinearOperatorCirculant2D",
"LinearOperatorCirculant3D",
]
# Different FFT Ops will be used for different block depths.
_FFT_OP = {1: fft_ops.fft, 2: fft_ops.fft2d, 3: fft_ops.fft3d}
_IFFT_OP = {1: fft_ops.ifft, 2: fft_ops.ifft2d, 3: fft_ops.ifft3d}
# TODO(langmore) Add transformations that create common spectrums, e.g.
# starting with the convolution kernel
# start with half a spectrum, and create a Hermitian one.
# common filters.
# TODO(langmore) Support rectangular Toeplitz matrices.
class _BaseLinearOperatorCirculant(linear_operator.LinearOperator):
"""Base class for circulant operators. Not user facing.
`LinearOperator` acting like a [batch] [[nested] block] circulant matrix.
"""
def __init__(self,
spectrum,
block_depth,
input_output_dtype=dtypes.complex64,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name="LinearOperatorCirculant"):
r"""Initialize an `_BaseLinearOperatorCirculant`.
Args:
spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`,
`float32`, `float64`, `complex64`, `complex128`. Type can be different
than `input_output_dtype`
block_depth: Python integer, either 1, 2, or 3. Will be 1 for circulant,
2 for block circulant, and 3 for nested block circulant.
input_output_dtype: `dtype` for input/output.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `spectrum` is real, this will always be true.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name to prepend to all ops created by this class.
Raises:
ValueError: If `block_depth` is not an allowed value.
TypeError: If `spectrum` is not an allowed type.
"""
allowed_block_depths = [1, 2, 3]
self._name = name
if block_depth not in allowed_block_depths:
raise ValueError("Expected block_depth to be in %s. Found: %s." %
(allowed_block_depths, block_depth))
self._block_depth = block_depth
with ops.name_scope(name, values=[spectrum]):
self._spectrum = self._check_spectrum_and_return_tensor(spectrum)
# Check and auto-set hints.
if not self.spectrum.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A real spectrum always corresponds to a self-adjoint operator.")
is_self_adjoint = True
if is_square is False:
raise ValueError(
"A [[nested] block] circulant operator is always square.")
is_square = True
# If spectrum.shape = [s0, s1, s2], and block_depth = 2,
# block_shape = [s1, s2]
s_shape = array_ops.shape(self.spectrum)
self._block_shape_tensor = s_shape[-self.block_depth:]
# Add common variants of spectrum to the graph.
self._spectrum_complex = _to_complex(self.spectrum)
self._abs_spectrum = math_ops.abs(self.spectrum)
self._conj_spectrum = math_ops.conj(self._spectrum_complex)
super(_BaseLinearOperatorCirculant, self).__init__(
dtype=dtypes.as_dtype(input_output_dtype),
graph_parents=[self.spectrum],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_spectrum_and_return_tensor(self, spectrum):
"""Static check of spectrum. Then return `Tensor` version."""
spectrum = ops.convert_to_tensor(spectrum, name="spectrum")
if spectrum.get_shape().ndims is not None:
if spectrum.get_shape().ndims < self.block_depth:
raise ValueError(
"Argument spectrum must have at least %d dimensions. Found: %s" %
(self.block_depth, spectrum))
return spectrum
@property
def block_depth(self):
"""Depth of recursively defined circulant blocks defining this `Operator`.
With `A` the dense representation of this `Operator`,
`block_depth = 1` means `A` is symmetric circulant. For example,
```
A = |w z y x|
|x w z y|
|y x w z|
|z y x w|
```
`block_depth = 2` means `A` is block symmetric circulant with symemtric
circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant,
```
A = |W Z Y X|
|X W Z Y|
|Y X W Z|
|Z Y X W|
```
`block_depth = 3` means `A` is block symmetric circulant with block
symmetric circulant blocks.
Returns:
Python `integer`.
"""
return self._block_depth
def block_shape_tensor(self):
"""Shape of the block dimensions of `self.spectrum`."""
return self._block_shape_tensor
@property
def block_shape(self):
return self.spectrum.get_shape()[-self.block_depth:]
@property
def spectrum(self):
return self._spectrum
def _vectorize_then_blockify(self, matrix):
"""Shape batch matrix to batch vector, then blockify trailing dimensions."""
# Suppose
# matrix.shape = [m0, m1, m2, m3],
# and matrix is a matrix because the final two dimensions are matrix dims.
# self.block_depth = 2,
# self.block_shape = [b0, b1] (note b0 * b1 = m2).
# We will reshape matrix to
# [m3, m0, m1, b0, b1].
# Vectorize: Reshape to batch vector.
# [m0, m1, m2, m3] --> [m3, m0, m1, m2]
# This is called "vectorize" because we have taken the final two matrix dims
# and turned this into a size m3 batch of vectors.
vec = distribution_util.rotate_transpose(matrix, shift=1)
# Blockify: Blockfy trailing dimensions.
# [m3, m0, m1, m2] --> [m3, m0, m1, b0, b1]
if (vec.get_shape().is_fully_defined() and
self.block_shape.is_fully_defined()):
# vec_leading_shape = [m3, m0, m1],
# the parts of vec that will not be blockified.
vec_leading_shape = vec.get_shape()[:-1]
final_shape = vec_leading_shape.concatenate(self.block_shape)
else:
vec_leading_shape = array_ops.shape(vec)[:-1]
final_shape = array_ops.concat(
(vec_leading_shape, self.block_shape_tensor()), 0)
return array_ops.reshape(vec, final_shape)
def _unblockify_then_matricize(self, vec):
"""Flatten the block dimensions then reshape to a batch matrix."""
# Suppose
# vec.shape = [v0, v1, v2, v3],
# self.block_depth = 2.
# Then
# leading shape = [v0, v1]
# block shape = [v2, v3].
# We will reshape vec to
# [v1, v2*v3, v0].
# Un-blockify: Flatten block dimensions. Reshape
# [v0, v1, v2, v3] --> [v0, v1, v2*v3].
if vec.get_shape().is_fully_defined():
# vec_shape = [v0, v1, v2, v3]
vec_shape = vec.get_shape().as_list()
# vec_leading_shape = [v0, v1]
vec_leading_shape = vec_shape[:-self.block_depth]
# vec_block_shape = [v2, v3]
vec_block_shape = vec_shape[-self.block_depth:]
# flat_shape = [v0, v1, v2*v3]
flat_shape = vec_leading_shape + [np.prod(vec_block_shape)]
else:
vec_shape = array_ops.shape(vec)
vec_leading_shape = vec_shape[:-self.block_depth]
vec_block_shape = vec_shape[-self.block_depth:]
flat_shape = array_ops.concat(
(vec_leading_shape, [math_ops.reduce_prod(vec_block_shape)]), 0)
vec_flat = array_ops.reshape(vec, flat_shape)
# Matricize: Reshape to batch matrix.
# [v0, v1, v2*v3] --> [v1, v2*v3, v0],
# representing a shape [v1] batch of [v2*v3, v0] matrices.
matrix = distribution_util.rotate_transpose(vec_flat, shift=-1)
return matrix
def _fft(self, x):
"""FFT along the last self.block_depth dimensions of x.
Args:
x: `Tensor` with floating or complex `dtype`.
Should be in the form returned by self._vectorize_then_blockify.
Returns:
`Tensor` with `dtype` `complex64`.
"""
x_complex = _to_complex(x)
return _FFT_OP[self.block_depth](x_complex)
def _ifft(self, x):
"""IFFT along the last self.block_depth dimensions of x.
Args:
x: `Tensor` with floating or complex dtype. Should be in the form
returned by self._vectorize_then_blockify.
Returns:
`Tensor` with `dtype` `complex64`.
"""
x_complex = _to_complex(x)
return _IFFT_OP[self.block_depth](x_complex)
def convolution_kernel(self, name="convolution_kernel"):
"""Convolution kernel corresponding to `self.spectrum`.
The `D` dimensional DFT of this kernel is the frequency domain spectrum of
this operator.
Args:
name: A name to give this `Op`.
Returns:
`Tensor` with `dtype` `self.dtype`.
"""
with self._name_scope(name):
h = self._ifft(self._spectrum_complex)
return math_ops.cast(h, self.dtype)
def _shape(self):
s_shape = self._spectrum.get_shape()
# Suppose spectrum.shape = [a, b, c, d]
# block_depth = 2
# Then:
# batch_shape = [a, b]
# N = c*d
# and we want to return
# [a, b, c*d, c*d]
batch_shape = s_shape[:-self.block_depth]
# trailing_dims = [c, d]
trailing_dims = s_shape[-self.block_depth:]
if trailing_dims.is_fully_defined():
n = np.prod(trailing_dims.as_list())
else:
n = None
n_x_n = tensor_shape.TensorShape([n, n])
return batch_shape.concatenate(n_x_n)
def _shape_tensor(self):
# See self.shape for explanation of steps
s_shape = array_ops.shape(self._spectrum)
batch_shape = s_shape[:-self.block_depth]
trailing_dims = s_shape[-self.block_depth:]
n = math_ops.reduce_prod(trailing_dims)
n_x_n = [n, n]
return array_ops.concat((batch_shape, n_x_n), 0)
def assert_hermitian_spectrum(self, name="assert_hermitian_spectrum"):
"""Returns an `Op` that asserts this operator has Hermitian spectrum.
This operator corresponds to a real-valued matrix if and only if its
spectrum is Hermitian.
Args:
name: A name to give this `Op`.
Returns:
An `Op` that asserts this operator has Hermitian spectrum.
"""
eps = np.finfo(self.dtype.real_dtype.as_numpy_dtype).eps
with self._name_scope(name):
# Assume linear accumulation of error.
max_err = eps * self.domain_dimension_tensor()
imag_convolution_kernel = math_ops.imag(self.convolution_kernel())
return check_ops.assert_less(
math_ops.abs(imag_convolution_kernel),
max_err,
message="Spectrum was not Hermitian")
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self.spectrum,
message="Singular operator: Spectrum contained zero values.")
def _assert_positive_definite(self):
# This operator has the action Ax = F^H D F x,
# where D is the diagonal matrix with self.spectrum on the diag. Therefore,
# <x, Ax> = <Fx, DFx>,
# Since F is bijective, the condition for positive definite is the same as
# for a diagonal matrix, i.e. real part of spectrum is positive.
message = (
"Not positive definite: Real part of spectrum was not all positive.")
return check_ops.assert_positive(
math_ops.real(self.spectrum), message=message)
def _assert_self_adjoint(self):
# Recall correspondence between symmetry and real transforms. See docstring
return linear_operator_util.assert_zero_imag_part(
self.spectrum,
message=(
"Not self-adjoint: The spectrum contained non-zero imaginary part."
))
def _broadcast_batch_dims(self, x, spectrum):
"""Broadcast batch dims of batch matrix `x` and spectrum."""
# spectrum.shape = batch_shape + block_shape
# First make spectrum a batch matrix with
# spectrum.shape = batch_shape + [prod(block_shape), 1]
spec_mat = array_ops.reshape(
spectrum, array_ops.concat(
(self.batch_shape_tensor(), [-1, 1]), axis=0))
# Second, broadcast, possibly requiring an addition of array of zeros.
x, spec_mat = linear_operator_util.broadcast_matrix_batch_dims((x,
spec_mat))
# Third, put the block shape back into spectrum.
batch_shape = array_ops.shape(x)[:-2]
spectrum = array_ops.reshape(
spec_mat,
array_ops.concat((batch_shape, self.block_shape_tensor()), axis=0))
return x, spectrum
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = linalg.adjoint(x) if adjoint_arg else x
# With F the matrix of a DFT, and F^{-1}, F^H the inverse and Hermitian
# transpose, one can show that F^{-1} = F^{H} is the IDFT matrix. Therefore
# matmul(x) = F^{-1} diag(spectrum) F x,
# = F^{H} diag(spectrum) F x,
# so that
# matmul(x, adjoint=True) = F^{H} diag(conj(spectrum)) F x.
spectrum = self._conj_spectrum if adjoint else self._spectrum_complex
x = math_ops.cast(x, spectrum.dtype)
x, spectrum = self._broadcast_batch_dims(x, spectrum)
x_vb = self._vectorize_then_blockify(x)
fft_x_vb = self._fft(x_vb)
block_vector_result = self._ifft(spectrum * fft_x_vb)
y = self._unblockify_then_matricize(block_vector_result)
return math_ops.cast(y, self.dtype)
def _determinant(self):
axis = [-(i + 1) for i in range(self.block_depth)]
det = math_ops.reduce_prod(self.spectrum, axis=axis)
return math_ops.cast(det, self.dtype)
def _log_abs_determinant(self):
axis = [-(i + 1) for i in range(self.block_depth)]
lad = math_ops.reduce_sum(math_ops.log(self._abs_spectrum), axis=axis)
return math_ops.cast(lad, self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
spectrum = self._conj_spectrum if adjoint else self._spectrum_complex
rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum)
rhs_vb = self._vectorize_then_blockify(rhs)
fft_rhs_vb = self._fft(rhs_vb)
solution_vb = self._ifft(fft_rhs_vb / spectrum)
x = self._unblockify_then_matricize(solution_vb)
return math_ops.cast(x, self.dtype)
def _diag_part(self):
# Get ones in shape of diag, which is [B1,...,Bb, N]
# Also get the size of the diag, "N".
if self.shape.is_fully_defined():
diag_shape = self.shape[:-1]
diag_size = self.domain_dimension.value
else:
diag_shape = self.shape_tensor()[:-1]
diag_size = self.domain_dimension_tensor()
ones_diag = array_ops.ones(diag_shape, dtype=self.dtype)
# As proved in comments in self._trace, the value on the diag is constant,
# repeated N times. This value is the trace divided by N.
# The handling of self.shape = (0, 0) is tricky, and is the reason we choose
# to compute trace and use that to compute diag_part, rather than computing
# the value on the diagonal ("diag_value") directly. Both result in a 0/0,
# but in different places, and the current method gives the right result in
# the end.
# Here, if self.shape = (0, 0), then self.trace() = 0., and then
# diag_value = 0. / 0. = NaN.
diag_value = self.trace() / math_ops.cast(diag_size, self.dtype)
# If self.shape = (0, 0), then ones_diag = [] (empty tensor), and then
# the following line is NaN * [] = [], as needed.
return diag_value[..., array_ops.newaxis] * ones_diag
def _trace(self):
# The diagonal of the [[nested] block] circulant operator is the mean of
# the spectrum.
# Proof: For the [0,...,0] element, this follows from the IDFT formula.
# Then the result follows since all diagonal elements are the same.
# Therefore, the trace is the sum of the spectrum.
# Get shape of diag along with the axis over which to reduce the spectrum.
# We will reduce the spectrum over all block indices.
if self.spectrum.get_shape().is_fully_defined():
spec_rank = self.spectrum.get_shape().ndims
axis = np.arange(spec_rank - self.block_depth, spec_rank, dtype=np.int32)
else:
spec_rank = array_ops.rank(self.spectrum)
axis = math_ops.range(spec_rank - self.block_depth, spec_rank)
# Real diag part "re_d".
# Suppose spectrum.shape = [B1,...,Bb, N1, N2]
# self.shape = [B1,...,Bb, N, N], with N1 * N2 = N.
# re_d_value.shape = [B1,...,Bb]
re_d_value = math_ops.reduce_sum(math_ops.real(self.spectrum), axis=axis)
if not self.dtype.is_complex:
return math_ops.cast(re_d_value, self.dtype)
# Imaginary part, "im_d".
if self.is_self_adjoint:
im_d_value = array_ops.zeros_like(re_d_value)
else:
im_d_value = math_ops.reduce_sum(math_ops.imag(self.spectrum), axis=axis)
return math_ops.cast(math_ops.complex(re_d_value, im_d_value), self.dtype)
@tf_export("linalg.LinearOperatorCirculant")
class LinearOperatorCirculant(_BaseLinearOperatorCirculant):
"""`LinearOperator` acting like a circulant matrix.
This operator acts like a circulant matrix `A` with
shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of circulant matrices
Circulant means the entries of `A` are generated by a single vector, the
convolution kernel `h`: `A_{mn} := h_{m-n mod N}`. With `h = [w, x, y, z]`,
```
A = |w z y x|
|x w z y|
|y x w z|
|z y x w|
```
This means that the result of matrix multiplication `v = Au` has `Lth` column
given circular convolution between `h` with the `Lth` column of `u`.
See http://ee.stanford.edu/~gray/toeplitz.pdf
#### Description in terms of the frequency spectrum
There is an equivalent description in terms of the [batch] spectrum `H` and
Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
dimensions. Define the discrete Fourier transform (DFT) and its inverse by
```
DFT[ h[n] ] = H[k] := sum_{n = 0}^{N - 1} h_n e^{-i 2pi k n / N}
IDFT[ H[k] ] = h[n] = N^{-1} sum_{k = 0}^{N - 1} H_k e^{i 2pi k n / N}
```
From these definitions, we see that
```
H[0] = sum_{n = 0}^{N - 1} h_n
H[1] = "the first positive frequency"
H[N - 1] = "the first negative frequency"
```
Loosely speaking, with `*` element-wise multiplication, matrix multiplication
is equal to the action of a Fourier multiplier: `A u = IDFT[ H * DFT[u] ]`.
Precisely speaking, given `[N, R]` matrix `u`, let `DFT[u]` be the `[N, R]`
matrix with `rth` column equal to the DFT of the `rth` column of `u`.
Define the `IDFT` similarly.
Matrix multiplication may be expressed columnwise:
```(A u)_r = IDFT[ H * (DFT[u])_r ]```
#### Operator properties deduced from the spectrum.
Letting `U` be the `kth` Euclidean basis vector, and `U = IDFT[u]`.
The above formulas show that`A U = H_k * U`. We conclude that the elements
of `H` are the eigenvalues of this operator. Therefore
* This operator is positive definite if and only if `Real{H} > 0`.
A general property of Fourier transforms is the correspondence between
Hermitian functions and real valued transforms.
Suppose `H.shape = [B1,...,Bb, N]`. We say that `H` is a Hermitian spectrum
if, with `%` meaning modulus division,
```H[..., n % N] = ComplexConjugate[ H[..., (-n) % N] ]```
* This operator corresponds to a real matrix if and only if `H` is Hermitian.
* This operator is self-adjoint if and only if `H` is real.
See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
#### Example of a self-adjoint positive definite operator
```python
# spectrum is real ==> operator is self-adjoint
# spectrum is positive ==> operator is positive definite
spectrum = [6., 4, 2]
operator = LinearOperatorCirculant(spectrum)
# IFFT[spectrum]
operator.convolution_kernel()
==> [4 + 0j, 1 + 0.58j, 1 - 0.58j]
operator.to_dense()
==> [[4 + 0.0j, 1 - 0.6j, 1 + 0.6j],
[1 + 0.6j, 4 + 0.0j, 1 - 0.6j],
[1 - 0.6j, 1 + 0.6j, 4 + 0.0j]]
```
#### Example of defining in terms of a real convolution kernel
```python
# convolution_kernel is real ==> spectrum is Hermitian.
convolution_kernel = [1., 2., 1.]]
spectrum = tf.signal.fft(tf.cast(convolution_kernel, tf.complex64))
# spectrum is Hermitian ==> operator is real.
# spectrum is shape [3] ==> operator is shape [3, 3]
# We force the input/output type to be real, which allows this to operate
# like a real matrix.
operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32)
operator.to_dense()
==> [[ 1, 1, 2],
[ 2, 1, 1],
[ 1, 2, 1]]
```
#### Example of Hermitian spectrum
```python
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
spectrum = [1, 1j, -1j]
operator = LinearOperatorCirculant(spectrum)
operator.to_dense()
==> [[ 0.33 + 0j, 0.91 + 0j, -0.24 + 0j],
[-0.24 + 0j, 0.33 + 0j, 0.91 + 0j],
[ 0.91 + 0j, -0.24 + 0j, 0.33 + 0j]
```
#### Example of forcing real `dtype` when spectrum is Hermitian
```python
# spectrum is shape [4] ==> operator is shape [4, 4]
# spectrum is real ==> operator is self-adjoint
# spectrum is Hermitian ==> operator is real
# spectrum has positive real part ==> operator is positive-definite.
spectrum = [6., 4, 2, 4]
# Force the input dtype to be float32.
# Cast the output to float32. This is fine because the operator will be
# real due to Hermitian spectrum.
operator = LinearOperatorCirculant(spectrum, input_output_dtype=tf.float32)
operator.shape
==> [4, 4]
operator.to_dense()
==> [[4, 1, 0, 1],
[1, 4, 1, 0],
[0, 1, 4, 1],
[1, 0, 1, 4]]
# convolution_kernel = tf.signal.ifft(spectrum)
operator.convolution_kernel()
==> [4, 1, 0, 1]
```
#### Performance
Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(R*N*Log[N])`
* `operator.solve(x)` is `O(R*N*Log[N])`
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
spectrum,
input_output_dtype=dtypes.complex64,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name="LinearOperatorCirculant"):
r"""Initialize an `LinearOperatorCirculant`.
This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
by providing `spectrum`, a `[B1,...,Bb, N]` `Tensor`.
If `input_output_dtype = DTYPE`:
* Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
* Values returned by all methods, such as `matmul` or `determinant` will be
cast to `DTYPE`.
Note that if the spectrum is not Hermitian, then this operator corresponds
to a complex matrix with non-zero imaginary part. In this case, setting
`input_output_dtype` to a real type will forcibly cast the output to be
real, resulting in incorrect results!
If on the other hand the spectrum is Hermitian, then this operator
corresponds to a real-valued matrix, and setting `input_output_dtype` to
a real type is fine.
Args:
spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`,
`float32`, `float64`, `complex64`, `complex128`. Type can be different
than `input_output_dtype`
input_output_dtype: `dtype` for input/output.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `spectrum` is real, this will always be true.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name to prepend to all ops created by this class.
"""
super(LinearOperatorCirculant, self).__init__(
spectrum,
block_depth=1,
input_output_dtype=input_output_dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@tf_export("linalg.LinearOperatorCirculant2D")
class LinearOperatorCirculant2D(_BaseLinearOperatorCirculant):
"""`LinearOperator` acting like a block circulant matrix.
This operator acts like a block circulant matrix `A` with
shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of block circulant matrices
If `A` is block circulant, with block sizes `N0, N1` (`N0 * N1 = N`):
`A` has a block circulant structure, composed of `N0 x N0` blocks, with each
block an `N1 x N1` circulant matrix.
For example, with `W`, `X`, `Y`, `Z` each circulant,
```
A = |W Z Y X|
|X W Z Y|
|Y X W Z|
|Z Y X W|
```
Note that `A` itself will not in general be circulant.
#### Description in terms of the frequency spectrum
There is an equivalent description in terms of the [batch] spectrum `H` and
Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
dimensions.
If `H.shape = [N0, N1]`, (`N0 * N1 = N`):
Loosely speaking, matrix multiplication is equal to the action of a
Fourier multiplier: `A u = IDFT2[ H DFT2[u] ]`.
Precisely speaking, given `[N, R]` matrix `u`, let `DFT2[u]` be the
`[N0, N1, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, R]` and taking
a two dimensional DFT across the first two dimensions. Let `IDFT2` be the
inverse of `DFT2`. Matrix multiplication may be expressed columnwise:
```(A u)_r = IDFT2[ H * (DFT2[u])_r ]```
#### Operator properties deduced from the spectrum.
* This operator is positive definite if and only if `Real{H} > 0`.
A general property of Fourier transforms is the correspondence between
Hermitian functions and real valued transforms.
Suppose `H.shape = [B1,...,Bb, N0, N1]`, we say that `H` is a Hermitian
spectrum if, with `%` indicating modulus division,
```
H[..., n0 % N0, n1 % N1] = ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1 ].
```
* This operator corresponds to a real matrix if and only if `H` is Hermitian.
* This operator is self-adjoint if and only if `H` is real.
See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
### Example of a self-adjoint positive definite operator
```python
# spectrum is real ==> operator is self-adjoint
# spectrum is positive ==> operator is positive definite
spectrum = [[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]]
operator = LinearOperatorCirculant2D(spectrum)
# IFFT[spectrum]
operator.convolution_kernel()
==> [[5.0+0.0j, -0.5-.3j, -0.5+.3j],
[-1.5-.9j, 0, 0],
[-1.5+.9j, 0, 0]]
operator.to_dense()
==> Complex self adjoint 9 x 9 matrix.
```
#### Example of defining in terms of a real convolution kernel,
```python
# convolution_kernel is real ==> spectrum is Hermitian.
convolution_kernel = [[1., 2., 1.], [5., -1., 1.]]
spectrum = tf.signal.fft2d(tf.cast(convolution_kernel, tf.complex64))
# spectrum is shape [2, 3] ==> operator is shape [6, 6]
# spectrum is Hermitian ==> operator is real.
operator = LinearOperatorCirculant2D(spectrum, input_output_dtype=tf.float32)
```
#### Performance
Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(R*N*Log[N])`
* `operator.solve(x)` is `O(R*N*Log[N])`
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
spectrum,
input_output_dtype=dtypes.complex64,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name="LinearOperatorCirculant2D"):
r"""Initialize an `LinearOperatorCirculant2D`.
This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
by providing `spectrum`, a `[B1,...,Bb, N0, N1]` `Tensor` with `N0*N1 = N`.
If `input_output_dtype = DTYPE`:
* Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
* Values returned by all methods, such as `matmul` or `determinant` will be
cast to `DTYPE`.
Note that if the spectrum is not Hermitian, then this operator corresponds
to a complex matrix with non-zero imaginary part. In this case, setting
`input_output_dtype` to a real type will forcibly cast the output to be
real, resulting in incorrect results!
If on the other hand the spectrum is Hermitian, then this operator
corresponds to a real-valued matrix, and setting `input_output_dtype` to
a real type is fine.
Args:
spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`,
`float32`, `float64`, `complex64`, `complex128`. Type can be different
than `input_output_dtype`
input_output_dtype: `dtype` for input/output.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `spectrum` is real, this will always be true.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name to prepend to all ops created by this class.
"""
super(LinearOperatorCirculant2D, self).__init__(
spectrum,
block_depth=2,
input_output_dtype=input_output_dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@tf_export("linalg.LinearOperatorCirculant3D")
class LinearOperatorCirculant3D(_BaseLinearOperatorCirculant):
"""`LinearOperator` acting like a nested block circulant matrix.
This operator acts like a block circulant matrix `A` with
shape `[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
#### Description in terms of block circulant matrices
If `A` is nested block circulant, with block sizes `N0, N1, N2`
(`N0 * N1 * N2 = N`):
`A` has a block structure, composed of `N0 x N0` blocks, with each
block an `N1 x N1` block circulant matrix.
For example, with `W`, `X`, `Y`, `Z` each block circulant,
```
A = |W Z Y X|
|X W Z Y|
|Y X W Z|
|Z Y X W|
```
Note that `A` itself will not in general be circulant.
#### Description in terms of the frequency spectrum
There is an equivalent description in terms of the [batch] spectrum `H` and
Fourier transforms. Here we consider `A.shape = [N, N]` and ignore batch
dimensions.
If `H.shape = [N0, N1, N2]`, (`N0 * N1 * N2 = N`):
Loosely speaking, matrix multiplication is equal to the action of a
Fourier multiplier: `A u = IDFT3[ H DFT3[u] ]`.
Precisely speaking, given `[N, R]` matrix `u`, let `DFT3[u]` be the
`[N0, N1, N2, R]` `Tensor` defined by re-shaping `u` to `[N0, N1, N2, R]` and
taking a three dimensional DFT across the first three dimensions. Let `IDFT3`
be the inverse of `DFT3`. Matrix multiplication may be expressed columnwise:
```(A u)_r = IDFT3[ H * (DFT3[u])_r ]```
#### Operator properties deduced from the spectrum.
* This operator is positive definite if and only if `Real{H} > 0`.
A general property of Fourier transforms is the correspondence between
Hermitian functions and real valued transforms.
Suppose `H.shape = [B1,...,Bb, N0, N1, N2]`, we say that `H` is a Hermitian
spectrum if, with `%` meaning modulus division,
```
H[..., n0 % N0, n1 % N1, n2 % N2]
= ComplexConjugate[ H[..., (-n0) % N0, (-n1) % N1, (-n2) % N2] ].
```
* This operator corresponds to a real matrix if and only if `H` is Hermitian.
* This operator is self-adjoint if and only if `H` is real.
See e.g. "Discrete-Time Signal Processing", Oppenheim and Schafer.
### Examples
See `LinearOperatorCirculant` and `LinearOperatorCirculant2D` for examples.
#### Performance
Suppose `operator` is a `LinearOperatorCirculant` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(R*N*Log[N])`
* `operator.solve(x)` is `O(R*N*Log[N])`
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
spectrum,
input_output_dtype=dtypes.complex64,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name="LinearOperatorCirculant3D"):
"""Initialize an `LinearOperatorCirculant`.
This `LinearOperator` is initialized to have shape `[B1,...,Bb, N, N]`
by providing `spectrum`, a `[B1,...,Bb, N0, N1, N2]` `Tensor`
with `N0*N1*N2 = N`.
If `input_output_dtype = DTYPE`:
* Arguments to methods such as `matmul` or `solve` must be `DTYPE`.
* Values returned by all methods, such as `matmul` or `determinant` will be
cast to `DTYPE`.
Note that if the spectrum is not Hermitian, then this operator corresponds
to a complex matrix with non-zero imaginary part. In this case, setting
`input_output_dtype` to a real type will forcibly cast the output to be
real, resulting in incorrect results!
If on the other hand the spectrum is Hermitian, then this operator
corresponds to a real-valued matrix, and setting `input_output_dtype` to
a real type is fine.
Args:
spectrum: Shape `[B1,...,Bb, N]` `Tensor`. Allowed dtypes: `float16`,
`float32`, `float64`, `complex64`, `complex128`. Type can be different
than `input_output_dtype`
input_output_dtype: `dtype` for input/output.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `spectrum` is real, this will always be true.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name to prepend to all ops created by this class.
"""
super(LinearOperatorCirculant3D, self).__init__(
spectrum,
block_depth=3,
input_output_dtype=input_output_dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _to_complex(x):
if x.dtype.is_complex:
return x
dtype = dtypes.complex64
if x.dtype == dtypes.float64:
dtype = dtypes.complex128
return math_ops.cast(x, dtype)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_circulant.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities for registering LinearOperator methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Note: only use this method in the commuting case.
def combined_commuting_self_adjoint_hint(operator_a, operator_b):
"""Get combined hint for self-adjoint-ness."""
# The property is preserved under composition when the operators commute.
if operator_a.is_self_adjoint and operator_b.is_self_adjoint:
return True
# The property is not preserved when an operator with the property is composed
# with an operator without the property.
# pylint:disable=g-bool-id-comparison
if ((operator_a.is_self_adjoint is True and
operator_b.is_self_adjoint is False) or
(operator_a.is_self_adjoint is False and
operator_b.is_self_adjoint is True)):
return False
# pylint:enable=g-bool-id-comparison
# The property is not known when operators are not known to have the property
# or both operators don't have the property (the property for the complement
# class is not closed under composition).
return None
def is_square(operator_a, operator_b):
"""Return a hint to whether the composition is square."""
if operator_a.is_square and operator_b.is_square:
return True
if operator_a.is_square is False and operator_b.is_square is False: # pylint:disable=g-bool-id-comparison
# Let A have shape [B, M, N], B have shape [B, N, L].
m = operator_a.range_dimension
l = operator_b.domain_dimension
if m is not None and l is not None:
return m == l
if (operator_a.is_square != operator_b.is_square) and (
operator_a.is_square is not None and operator_a.is_square is not None):
return False
return None
# Note: Positive definiteness is only guaranteed to be preserved
# when the operators commute and are symmetric. Only use this method in
# commuting cases.
def combined_commuting_positive_definite_hint(operator_a, operator_b):
"""Get combined PD hint for compositions."""
# pylint:disable=g-bool-id-comparison
if (operator_a.is_positive_definite is True and
operator_a.is_self_adjoint is True and
operator_b.is_positive_definite is True and
operator_b.is_self_adjoint is True):
return True
# pylint:enable=g-bool-id-comparison
return None
def combined_non_singular_hint(operator_a, operator_b):
"""Get combined hint for when ."""
# If either operator is not-invertible the composition isn't.
# pylint:disable=g-bool-id-comparison
if (operator_a.is_non_singular is False or
operator_b.is_non_singular is False):
return False
# pylint:enable=g-bool-id-comparison
return operator_a.is_non_singular and operator_b.is_non_singular
|
tensorflow-master
|
tensorflow/python/ops/linalg/registrations_util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.adjoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_adjoint
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_block_diag
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_householder
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_kronecker
# By default, return LinearOperatorAdjoint which switched the .matmul
# and .solve methods.
@linear_operator_algebra.RegisterAdjoint(linear_operator.LinearOperator)
def _adjoint_linear_operator(linop):
return linear_operator_adjoint.LinearOperatorAdjoint(
linop,
is_non_singular=linop.is_non_singular,
is_self_adjoint=linop.is_self_adjoint,
is_positive_definite=linop.is_positive_definite,
is_square=linop.is_square)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_adjoint.LinearOperatorAdjoint)
def _adjoint_adjoint_linear_operator(linop):
return linop.operator
@linear_operator_algebra.RegisterAdjoint(
linear_operator_identity.LinearOperatorIdentity)
def _adjoint_identity(identity_operator):
return identity_operator
@linear_operator_algebra.RegisterAdjoint(
linear_operator_identity.LinearOperatorScaledIdentity)
def _adjoint_scaled_identity(identity_operator):
multiplier = identity_operator.multiplier
if multiplier.dtype.is_complex:
multiplier = math_ops.conj(multiplier)
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
multiplier=multiplier,
is_non_singular=identity_operator.is_non_singular,
is_self_adjoint=identity_operator.is_self_adjoint,
is_positive_definite=identity_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_diag.LinearOperatorDiag)
def _adjoint_diag(diag_operator):
diag = diag_operator.diag
if diag.dtype.is_complex:
diag = math_ops.conj(diag)
return linear_operator_diag.LinearOperatorDiag(
diag=diag,
is_non_singular=diag_operator.is_non_singular,
is_self_adjoint=diag_operator.is_self_adjoint,
is_positive_definite=diag_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_block_diag.LinearOperatorBlockDiag)
def _adjoint_block_diag(block_diag_operator):
# We take the adjoint of each block on the diagonal.
return linear_operator_block_diag.LinearOperatorBlockDiag(
operators=[
operator.adjoint() for operator in block_diag_operator.operators],
is_non_singular=block_diag_operator.is_non_singular,
is_self_adjoint=block_diag_operator.is_self_adjoint,
is_positive_definite=block_diag_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_kronecker.LinearOperatorKronecker)
def _adjoint_kronecker(kronecker_operator):
# Adjoint of a Kronecker product is the Kronecker product
# of adjoints.
return linear_operator_kronecker.LinearOperatorKronecker(
operators=[
operator.adjoint() for operator in kronecker_operator.operators],
is_non_singular=kronecker_operator.is_non_singular,
is_self_adjoint=kronecker_operator.is_self_adjoint,
is_positive_definite=kronecker_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_circulant.LinearOperatorCirculant)
def _adjoint_circulant(circulant_operator):
spectrum = circulant_operator.spectrum
if spectrum.dtype.is_complex:
spectrum = math_ops.conj(spectrum)
# Conjugating the spectrum is sufficient to get the adjoint.
return linear_operator_circulant.LinearOperatorCirculant(
spectrum=spectrum,
is_non_singular=circulant_operator.is_non_singular,
is_self_adjoint=circulant_operator.is_self_adjoint,
is_positive_definite=circulant_operator.is_positive_definite,
is_square=True)
@linear_operator_algebra.RegisterAdjoint(
linear_operator_householder.LinearOperatorHouseholder)
def _adjoint_householder(householder_operator):
return householder_operator
|
tensorflow-master
|
tensorflow/python/ops/linalg/adjoint_registrations.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_composition
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_inversion
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
from tensorflow.python.ops.linalg import registrations_util
# By default, use a LinearOperatorComposition to delay the computation.
@linear_operator_algebra.RegisterSolve(
linear_operator.LinearOperator, linear_operator.LinearOperator)
def _solve_linear_operator(linop_a, linop_b):
"""Generic solve of two `LinearOperator`s."""
is_square = registrations_util.is_square(linop_a, linop_b)
is_non_singular = None
is_self_adjoint = None
is_positive_definite = None
if is_square:
is_non_singular = registrations_util.combined_non_singular_hint(
linop_a, linop_b)
elif is_square is False: # pylint:disable=g-bool-id-comparison
is_non_singular = False
is_self_adjoint = False
is_positive_definite = False
return linear_operator_composition.LinearOperatorComposition(
operators=[
linear_operator_inversion.LinearOperatorInversion(linop_a),
linop_b
],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
)
@linear_operator_algebra.RegisterSolve(
linear_operator_inversion.LinearOperatorInversion,
linear_operator.LinearOperator)
def _solve_inverse_linear_operator(linop_a, linop_b):
"""Solve inverse of generic `LinearOperator`s."""
return linop_a.operator.matmul(linop_b)
# Identity
@linear_operator_algebra.RegisterSolve(
linear_operator_identity.LinearOperatorIdentity,
linear_operator.LinearOperator)
def _solve_linear_operator_identity_left(identity, linop):
del identity
return linop
# Diag.
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_diag.LinearOperatorDiag)
def _solve_linear_operator_diag(linop_a, linop_b):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_b.diag / linop_a.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_identity.LinearOperatorScaledIdentity)
def _solve_linear_operator_diag_scaled_identity_right(
linop_diag, linop_scaled_identity):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_scaled_identity.multiplier / linop_diag.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_identity.LinearOperatorScaledIdentity,
linear_operator_diag.LinearOperatorDiag)
def _solve_linear_operator_diag_scaled_identity_left(
linop_scaled_identity, linop_diag):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_diag.diag / linop_scaled_identity.multiplier,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterSolve(
linear_operator_diag.LinearOperatorDiag,
linear_operator_lower_triangular.LinearOperatorLowerTriangular)
def _solve_linear_operator_diag_tril(linop_diag, linop_triangular):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=linop_triangular.to_dense() / linop_diag.diag[..., None],
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_triangular),
# This is safe to do since the Triangular matrix is only self-adjoint
# when it is a diagonal matrix, and hence commutes.
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_triangular),
is_positive_definite=None,
is_square=True)
# Circulant.
@linear_operator_algebra.RegisterSolve(
linear_operator_circulant.LinearOperatorCirculant,
linear_operator_circulant.LinearOperatorCirculant)
def _solve_linear_operator_circulant_circulant(linop_a, linop_b):
return linear_operator_circulant.LinearOperatorCirculant(
spectrum=linop_b.spectrum / linop_a.spectrum,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
|
tensorflow-master
|
tensorflow/python/ops/linalg/solve_registrations.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random.normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_full_matrix.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorKronecker"]
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
@tf_export("linalg.LinearOperatorKronecker")
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j + [M_j, N_j]`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random.normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random.normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random.normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_kronecker.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for linear operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperator"]
# TODO(langmore) Use matrix_solve_ls for singular or non-square matrices.
@tf_export("linalg.LinearOperator")
@six.add_metaclass(abc.ABCMeta)
class LinearOperator(object):
"""Base class defining a [batch of] linear operator[s].
Subclasses of `LinearOperator` provide access to common methods on a
(batch) matrix, without the need to materialize the matrix. This allows:
* Matrix free computations
* Operators that take advantage of special structure, while providing a
consistent API to users.
#### Subclassing
To enable a public method, subclasses should implement the leading-underscore
version of the method. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable
`matmul(x, adjoint=False, name="matmul")` a subclass should implement
`_matmul(x, adjoint=False)`.
#### Performance contract
Subclasses should only implement the assert methods
(e.g. `assert_non_singular`) if they can be done in less than `O(N^3)`
time.
Class docstrings should contain an explanation of computational complexity.
Since this is a high-performance library, attention should be paid to detail,
and explanations can include constants as well as Big-O notation.
#### Shape compatibility
`LinearOperator` subclasses should operate on a [batch] matrix with
compatible shape. Class docstrings should define what is meant by compatible
shape. Some subclasses may not support batching.
Examples:
`x` is a batch matrix with compatible shape for `matmul` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
x.shape = [B1,...,Bb] + [N, R]
```
`rhs` is a batch matrix with compatible shape for `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], b >= 0,
rhs.shape = [B1,...,Bb] + [M, R]
```
#### Example docstring for subclasses.
This operator acts like a (batch) matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `m x n` matrix. Again, this matrix `A` may not be materialized, but for
purposes of identifying and working with compatible arguments the shape is
relevant.
Examples:
```python
some_tensor = ... shape = ????
operator = MyLinOp(some_tensor)
operator.shape()
==> [2, 4, 4]
operator.log_abs_determinant()
==> Shape [2] Tensor
x = ... Shape [2, 4, 5] Tensor
operator.matmul(x)
==> Shape [2, 4, 5] Tensor
```
#### Shape compatibility
This operator acts on batch matrices with compatible shape.
FILL IN WHAT IS MEANT BY COMPATIBLE SHAPE
#### Performance
FILL THIS IN
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
dtype,
graph_parents=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize the `LinearOperator`.
**This is a private method for subclass use.**
**Subclasses should copy-paste this `__init__` documentation.**
Args:
dtype: The type of the this `LinearOperator`. Arguments to `matmul` and
`solve` will have to be this type.
graph_parents: Python list of graph prerequisites of this `LinearOperator`
Typically tensors that are passed during initialization.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `dtype` is real, this is equivalent to being symmetric.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If any member of graph_parents is `None` or not a `Tensor`.
ValueError: If hints are set incorrectly.
"""
# Check and auto-set flags.
if is_positive_definite:
if is_non_singular is False:
raise ValueError("A positive definite matrix is always non-singular.")
is_non_singular = True
if is_non_singular:
if is_square is False:
raise ValueError("A non-singular matrix is always square.")
is_square = True
if is_self_adjoint:
if is_square is False:
raise ValueError("A self-adjoint matrix is always square.")
is_square = True
self._is_square_set_or_implied_by_hints = is_square
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._graph_parents = graph_parents
self._is_non_singular = is_non_singular
self._is_self_adjoint = is_self_adjoint
self._is_positive_definite = is_positive_definite
self._name = name or type(self).__name__
@contextlib.contextmanager
def _name_scope(self, name=None):
"""Helper function to standardize op scope."""
full_name = self.name
if name is not None:
full_name += "/" + name
with ops.name_scope(full_name) as scope:
yield scope
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `LinearOperator`."""
return self._dtype
@property
def name(self):
"""Name prepended to all ops created by this `LinearOperator`."""
return self._name
@property
def graph_parents(self):
"""List of graph dependencies of this `LinearOperator`."""
return self._graph_parents
@property
def is_non_singular(self):
return self._is_non_singular
@property
def is_self_adjoint(self):
return self._is_self_adjoint
@property
def is_positive_definite(self):
return self._is_positive_definite
@property
def is_square(self):
"""Return `True/False` depending on if this operator is square."""
# Static checks done after __init__. Why? Because domain/range dimension
# sometimes requires lots of work done in the derived class after init.
auto_square_check = self.domain_dimension == self.range_dimension
if self._is_square_set_or_implied_by_hints is False and auto_square_check:
raise ValueError(
"User set is_square hint to False, but the operator was square.")
if self._is_square_set_or_implied_by_hints is None:
return auto_square_check
return self._is_square_set_or_implied_by_hints
@abc.abstractmethod
def _shape(self):
# Write this in derived class to enable all static shape methods.
raise NotImplementedError("_shape is not implemented.")
@property
def shape(self):
"""`TensorShape` of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb, M, N])`, equivalent to `A.get_shape()`.
Returns:
`TensorShape`, statically determined, may be undefined.
"""
return self._shape()
@abc.abstractmethod
def _shape_tensor(self):
raise NotImplementedError("_shape_tensor is not implemented.")
def shape_tensor(self, name="shape_tensor"):
"""Shape of this `LinearOperator`, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb, M, N]`, equivalent to `tf.shape(A)`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.shape.is_fully_defined():
return linear_operator_util.shape_tensor(self.shape.as_list())
else:
return self._shape_tensor()
@property
def batch_shape(self):
"""`TensorShape` of batch dimensions of this `LinearOperator`.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns
`TensorShape([B1,...,Bb])`, equivalent to `A.get_shape()[:-2]`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .shape is implemented.
return self.shape[:-2]
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of batch dimensions of this operator, determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns a `Tensor` holding
`[B1,...,Bb]`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.batch_shape.is_fully_defined():
return linear_operator_util.shape_tensor(
self.batch_shape.as_list(), name="batch_shape")
else:
return self.shape_tensor()[:-2]
@property
def tensor_rank(self, name="tensor_rank"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
Python integer, or None if the tensor rank is undefined.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
return self.shape.ndims
def tensor_rank_tensor(self, name="tensor_rank_tensor"):
"""Rank (in the sense of tensors) of matrix corresponding to this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `b + 2`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`, determined at runtime.
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
if self.tensor_rank is not None:
return ops.convert_to_tensor(self.tensor_rank)
else:
return array_ops.size(self.shape_tensor())
@property
def domain_dimension(self):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.rank is None:
return tensor_shape.Dimension(None)
else:
return self.shape.dims[-1]
def domain_dimension_tensor(self, name="domain_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the domain of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `N`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-1]
@property
def range_dimension(self):
"""Dimension (in the sense of vector spaces) of the range of this operator.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Returns:
`Dimension` object.
"""
# Derived classes get this "for free" once .shape is implemented.
if self.shape.dims:
return self.shape.dims[-2]
else:
return tensor_shape.Dimension(None)
def range_dimension_tensor(self, name="range_dimension_tensor"):
"""Dimension (in the sense of vector spaces) of the range of this operator.
Determined at runtime.
If this operator acts like the batch matrix `A` with
`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.
Args:
name: A name for this `Op`.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with self._name_scope(name):
# Prefer to use statically defined shape if available.
dim_value = tensor_shape.dimension_value(self.range_dimension)
if dim_value is not None:
return ops.convert_to_tensor(dim_value)
else:
return self.shape_tensor()[-2]
def _assert_non_singular(self):
"""Private default implementation of _assert_non_singular."""
logging.warn(
"Using (possibly slow) default implementation of assert_non_singular."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return self.assert_positive_definite()
else:
singular_values = linalg_ops.svd(self.to_dense(), compute_uv=False)
# TODO(langmore) Add .eig and .cond as methods.
cond = (math_ops.reduce_max(singular_values, axis=-1) /
math_ops.reduce_min(singular_values, axis=-1))
return check_ops.assert_less(
cond,
self._max_condition_number_to_be_non_singular(),
message="Singular matrix up to precision epsilon.")
def _max_condition_number_to_be_non_singular(self):
"""Return the maximum condition number that we consider nonsingular."""
with ops.name_scope("max_nonsingular_condition_number"):
dtype_eps = np.finfo(self.dtype.as_numpy_dtype).eps
eps = math_ops.cast(
math_ops.reduce_max([
100.,
math_ops.cast(self.range_dimension_tensor(), self.dtype),
math_ops.cast(self.domain_dimension_tensor(), self.dtype)
]), self.dtype) * dtype_eps
return 1. / eps
def assert_non_singular(self, name="assert_non_singular"):
"""Returns an `Op` that asserts this operator is non singular.
This operator is considered non-singular if
```
ConditionNumber < max{100, range_dimension, domain_dimension} * eps,
eps := np.finfo(self.dtype.as_numpy_dtype).eps
```
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is singular.
"""
with self._name_scope(name):
return self._assert_non_singular()
def _assert_positive_definite(self):
"""Default implementation of _assert_positive_definite."""
logging.warn(
"Using (possibly slow) default implementation of "
"assert_positive_definite."
" Requires conversion to a dense matrix and O(N^3) operations.")
# If the operator is self-adjoint, then checking that
# Cholesky decomposition succeeds + results in positive diag is necessary
# and sufficient.
if self.is_self_adjoint:
return check_ops.assert_positive(
array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())),
message="Matrix was not positive definite.")
# We have no generic check for positive definite.
raise NotImplementedError("assert_positive_definite is not implemented.")
def assert_positive_definite(self, name="assert_positive_definite"):
"""Returns an `Op` that asserts this operator is positive definite.
Here, positive definite means that the quadratic form `x^H A x` has positive
real part for all nonzero `x`. Note that we do not require the operator to
be self-adjoint to be positive definite.
Args:
name: A name to give this `Op`.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not positive definite.
"""
with self._name_scope(name):
return self._assert_positive_definite()
def _assert_self_adjoint(self):
dense = self.to_dense()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
def assert_self_adjoint(self, name="assert_self_adjoint"):
"""Returns an `Op` that asserts this operator is self-adjoint.
Here we check that this operator is *exactly* equal to its hermitian
transpose.
Args:
name: A string name to prepend to created ops.
Returns:
An `Assert` `Op`, that, when run, will raise an `InvalidArgumentError` if
the operator is not self-adjoint.
"""
with self._name_scope(name):
return self._assert_self_adjoint()
def _check_input_dtype(self, arg):
"""Check that arg.dtype == self.dtype."""
if arg.dtype != self.dtype:
raise TypeError(
"Expected argument to have dtype %s. Found: %s in tensor %s" %
(self.dtype, arg.dtype, arg))
@abc.abstractmethod
def _matmul(self, x, adjoint=False, adjoint_arg=False):
raise NotImplementedError("_matmul is not implemented.")
def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"):
"""Transform [batch] matrix `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
X = ... # shape [..., N, R], batch matrix, R > 0.
Y = operator.matmul(X)
Y.shape
==> [..., M, R]
Y[..., :, r] = sum_j A[..., :, j] X[j, r]
```
Args:
x: `LinearOperator` or `Tensor` with compatible shape and same `dtype` as
`self`. See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is
the hermitian transpose (transposition and complex conjugation).
name: A name for this `Op`.
Returns:
A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`
as `self`.
"""
if isinstance(x, LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = x.adjoint() if adjoint_arg else x
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `x` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.matmul(left_operator, right_operator)
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
x.get_shape()[arg_dim])
return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
x_mat = array_ops.expand_dims(x, axis=-1)
y_mat = self.matmul(x_mat, adjoint=adjoint)
return array_ops.squeeze(y_mat, axis=-1)
def matvec(self, x, adjoint=False, name="matvec"):
"""Transform [batch] vector `x` with left multiplication: `x --> Ax`.
```python
# Make an operator acting like batch matric A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
X = ... # shape [..., N], batch vector
Y = operator.matvec(X)
Y.shape
==> [..., M]
Y[..., :] = sum_j A[..., :, j] X[..., j]
```
Args:
x: `Tensor` with compatible shape and same `dtype` as `self`.
`x` is treated as a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.
name: A name for this `Op`.
Returns:
A `Tensor` with shape `[..., M]` and same `dtype` as `self`.
"""
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
self_dim = -2 if adjoint else -1
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(x.get_shape()[-1])
return self._matvec(x, adjoint=adjoint)
def _determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self.to_dense())
def determinant(self, name="det"):
"""Determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._determinant()
def _log_abs_determinant(self):
logging.warn(
"Using (possibly slow) default implementation of determinant."
" Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det
def log_abs_determinant(self, name="log_abs_det"):
"""Log absolute value of determinant for every batch member.
Args:
name: A name for this `Op`.
Returns:
`Tensor` with shape `self.batch_shape` and same `dtype` as `self`.
Raises:
NotImplementedError: If `self.is_square` is `False`.
"""
if self.is_square is False:
raise NotImplementedError(
"Determinant not implemented for an operator that is expected to "
"not be square.")
with self._name_scope(name):
return self._log_abs_determinant()
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linear_operator_util.cholesky_solve_with_broadcast(
linalg_ops.cholesky(self.to_dense()), rhs)
return linear_operator_util.matrix_solve_with_broadcast(
self.to_dense(), rhs, adjoint=adjoint)
def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
"""Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve R > 0 linear systems for every member of the batch.
RHS = ... # shape [..., M, R]
X = operator.solve(RHS)
# X[..., :, r] is the solution to the r'th linear system
# sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]
operator.matmul(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape.
`rhs` is treated like a [batch] matrix meaning for every set of leading
dimensions, the last two dimensions defines a matrix.
See class docstring for definition of compatibility.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
adjoint_arg: Python `bool`. If `True`, solve `A X = rhs^H` where `rhs^H`
is the hermitian transpose (transposition and complex conjugation).
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
if self.is_non_singular is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"be singular.")
if self.is_square is False:
raise NotImplementedError(
"Exact solve not implemented for an operator that is expected to "
"not be square.")
if isinstance(rhs, LinearOperator):
left_operator = self.adjoint() if adjoint else self
right_operator = rhs.adjoint() if adjoint_arg else rhs
if (right_operator.range_dimension is not None and
left_operator.domain_dimension is not None and
right_operator.range_dimension != left_operator.domain_dimension):
raise ValueError(
"Operators are incompatible. Expected `rhs` to have dimension"
" {} but got {}.".format(
left_operator.domain_dimension, right_operator.range_dimension))
with self._name_scope(name):
return linear_operator_algebra.solve(left_operator, right_operator)
with self._name_scope(name):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
arg_dim = -1 if adjoint_arg else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[arg_dim])
return self._solve(rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False):
"""Default implementation of _solvevec."""
rhs_mat = array_ops.expand_dims(rhs, axis=-1)
solution_mat = self.solve(rhs_mat, adjoint=adjoint)
return array_ops.squeeze(solution_mat, axis=-1)
def solvevec(self, rhs, adjoint=False, name="solve"):
"""Solve single equation with best effort: `A X = rhs`.
The returned `Tensor` will be close to an exact solution if `A` is well
conditioned. Otherwise closeness will vary. See class docstring for details.
Examples:
```python
# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]
operator = LinearOperator(...)
operator.shape = [..., M, N]
# Solve one linear system for every member of the batch.
RHS = ... # shape [..., M]
X = operator.solvevec(RHS)
# X is the solution to the linear system
# sum_j A[..., :, j] X[..., j] = RHS[..., :]
operator.matvec(X)
==> RHS
```
Args:
rhs: `Tensor` with same `dtype` as this operator.
`rhs` is treated like a [batch] vector meaning for every set of leading
dimensions, the last dimension defines a vector. See class docstring
for definition of compatibility regarding batch dimensions.
adjoint: Python `bool`. If `True`, solve the system involving the adjoint
of this `LinearOperator`: `A^H X = rhs`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[...,N]` and same `dtype` as `rhs`.
Raises:
NotImplementedError: If `self.is_non_singular` or `is_square` is False.
"""
with self._name_scope(name):
rhs = ops.convert_to_tensor(rhs, name="rhs")
self._check_input_dtype(rhs)
self_dim = -1 if adjoint else -2
tensor_shape.dimension_at_index(
self.shape, self_dim).assert_is_compatible_with(
rhs.get_shape()[-1])
return self._solvevec(rhs, adjoint=adjoint)
def adjoint(self, name="adjoint"):
"""Returns the adjoint of the current `LinearOperator`.
Given `A` representing this `LinearOperator`, return `A*`.
Note that calling `self.adjoint()` and `self.H` are equivalent.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the adjoint of this `LinearOperator`.
"""
if self.is_self_adjoint is True: # pylint: disable=g-bool-id-comparison
return self
with self._name_scope(name):
return linear_operator_algebra.adjoint(self)
# self.H is equivalent to self.adjoint().
H = property(adjoint, None)
def inverse(self, name="inverse"):
"""Returns the Inverse of this `LinearOperator`.
Given `A` representing this `LinearOperator`, return a `LinearOperator`
representing `A^-1`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`LinearOperator` representing inverse of this matrix.
Raises:
ValueError: When the `LinearOperator` is not hinted to be `non_singular`.
"""
if self.is_square is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a non square matrix.")
if self.is_non_singular is False: # pylint: disable=g-bool-id-comparison
raise ValueError("Cannot take the Inverse: This operator represents "
"a singular matrix.")
with self._name_scope(name):
return linear_operator_algebra.inverse(self)
def cholesky(self, name="cholesky"):
"""Returns a Cholesky factor as a `LinearOperator`.
Given `A` representing this `LinearOperator`, if `A` is positive definite
self-adjoint, return `L`, where `A = L L^T`, i.e. the cholesky
decomposition.
Args:
name: A name for this `Op`.
Returns:
`LinearOperator` which represents the lower triangular matrix
in the Cholesky decomposition.
Raises:
ValueError: When the `LinearOperator` is not hinted to be positive
definite and self adjoint.
"""
if not self._can_use_cholesky():
raise ValueError("Cannot take the Cholesky decomposition: "
"Not a positive definite self adjoint matrix.")
with self._name_scope(name):
return linear_operator_algebra.cholesky(self)
def _to_dense(self):
"""Generic and often inefficient implementation. Override often."""
logging.warn("Using (possibly slow) default implementation of to_dense."
" Converts by self.matmul(identity).")
if self.batch_shape.is_fully_defined():
batch_shape = self.batch_shape
else:
batch_shape = self.batch_shape_tensor()
dim_value = tensor_shape.dimension_value(self.domain_dimension)
if dim_value is not None:
n = dim_value
else:
n = self.domain_dimension_tensor()
eye = linalg_ops.eye(num_rows=n, batch_shape=batch_shape, dtype=self.dtype)
return self.matmul(eye)
def to_dense(self, name="to_dense"):
"""Return a dense (batch) matrix representing this operator."""
with self._name_scope(name):
return self._to_dense()
def _diag_part(self):
"""Generic and often inefficient implementation. Override often."""
return array_ops.matrix_diag_part(self.to_dense())
def diag_part(self, name="diag_part"):
"""Efficiently get the [batch] diagonal part of this operator.
If this operator has shape `[B1,...,Bb, M, N]`, this returns a
`Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where
`diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`.
```
my_operator = LinearOperatorDiag([1., 2.])
# Efficiently get the diagonal
my_operator.diag_part()
==> [1., 2.]
# Equivalent, but inefficient method
tf.linalg.diag_part(my_operator.to_dense())
==> [1., 2.]
```
Args:
name: A name for this `Op`.
Returns:
diag_part: A `Tensor` of same `dtype` as self.
"""
with self._name_scope(name):
return self._diag_part()
def _trace(self):
return math_ops.reduce_sum(self.diag_part(), axis=-1)
def trace(self, name="trace"):
"""Trace of the linear operator, equal to sum of `self.diag_part()`.
If the operator is square, this is also the sum of the eigenvalues.
Args:
name: A name for this `Op`.
Returns:
Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`.
"""
with self._name_scope(name):
return self._trace()
def _add_to_tensor(self, x):
# Override if a more efficient implementation is available.
return self.to_dense() + x
def add_to_tensor(self, x, name="add_to_tensor"):
"""Add matrix represented by this operator to `x`. Equivalent to `A + x`.
Args:
x: `Tensor` with same `dtype` and shape broadcastable to `self.shape`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with self._name_scope(name):
x = ops.convert_to_tensor(x, name="x")
self._check_input_dtype(x)
return self._add_to_tensor(x)
def _can_use_cholesky(self):
return self.is_self_adjoint and self.is_positive_definite
# Overrides for tf.linalg functions. This allows a LinearOperator to be used in
# place of a Tensor.
# For instance tf.trace(linop) and linop.trace() both work.
@dispatch.dispatch_for_types(linalg.adjoint, LinearOperator)
def _adjoint(matrix, name=None):
return matrix.adjoint(name)
@dispatch.dispatch_for_types(linalg.cholesky, LinearOperator)
def _cholesky(input, name=None): # pylint:disable=redefined-builtin
return input.cholesky(name)
# The signature has to match with the one in python/op/array_ops.py,
# so we have k and padding_value even though we don't use them here.
@dispatch.dispatch_for_types(linalg.diag_part, LinearOperator)
def _diag_part(input, name="diag_part", k=0, padding_value=0): # pylint:disable=redefined-builtin, unused-argument
return input.diag_part(name)
@dispatch.dispatch_for_types(linalg.det, LinearOperator)
def _det(input, name=None): # pylint:disable=redefined-builtin
return input.determinant(name)
@dispatch.dispatch_for_types(linalg.inv, LinearOperator)
def _inverse(input, adjoint=False, name=None): # pylint:disable=redefined-builtin
inv = input.inverse(name)
if adjoint:
inv = inv.adjoint()
return inv
@dispatch.dispatch_for_types(linalg.logdet, LinearOperator)
def _logdet(matrix, name=None):
if matrix.is_positive_definite and matrix.is_self_adjoint:
return matrix.log_abs_determinant(name)
raise ValueError("Expected matrix to be self-adjoint positive definite.")
@dispatch.dispatch_for_types(math_ops.matmul, LinearOperator)
def _matmul( # pylint:disable=missing-docstring
a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
if transpose_a or transpose_b:
raise ValueError("Transposing not supported at this time.")
if a_is_sparse or b_is_sparse:
raise ValueError("Sparse methods not supported at this time.")
if not isinstance(a, LinearOperator):
# We use the identity (B^HA^H)^H = AB
adjoint_matmul = b.matmul(
a,
adjoint=(not adjoint_b),
adjoint_arg=(not adjoint_a),
name=name)
return linalg.adjoint(adjoint_matmul)
return a.matmul(
b, adjoint=adjoint_a, adjoint_arg=adjoint_b, name=name)
@dispatch.dispatch_for_types(linalg.solve, LinearOperator)
def _solve(
matrix,
rhs,
adjoint=False,
name=None):
if not isinstance(matrix, LinearOperator):
raise ValueError("Passing in `matrix` as a Tensor and `rhs` as a "
"LinearOperator is not supported.")
return matrix.solve(rhs, adjoint=adjoint, name=name)
@dispatch.dispatch_for_types(linalg.trace, LinearOperator)
def _trace(x, name=None):
return x.trace(name)
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Internal utilities for `LinearOperator` classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
def assert_no_entries_with_modulus_zero(
x, message=None, name="assert_no_entries_with_modulus_zero"):
"""Returns `Op` that asserts Tensor `x` has no entries with modulus zero.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype.base_dtype
should_be_nonzero = math_ops.abs(x)
zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
return check_ops.assert_less(zero, should_be_nonzero, message=message)
def assert_zero_imag_part(x, message=None, name="assert_zero_imag_part"):
"""Returns `Op` that asserts Tensor `x` has no non-zero imaginary parts.
Args:
x: Numeric `Tensor`, real, integer, or complex.
message: A string message to prepend to failure message.
name: A name to give this `Op`.
Returns:
An `Op` that asserts `x` has no entries with modulus zero.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype.base_dtype
if dtype.is_floating:
return control_flow_ops.no_op()
zero = ops.convert_to_tensor(0, dtype=dtype.real_dtype)
return check_ops.assert_equal(zero, math_ops.imag(x), message=message)
def assert_compatible_matrix_dimensions(operator, x):
"""Assert that an argument to solve/matmul has proper domain dimension.
If `operator.shape[-2:] = [M, N]`, and `x.shape[-2:] = [Q, R]`, then
`operator.matmul(x)` is defined only if `N = Q`. This `Op` returns an
`Assert` that "fires" if this is not the case. Static checks are already
done by the base class `LinearOperator`.
Args:
operator: `LinearOperator`.
x: `Tensor`.
Returns:
`Assert` `Op`.
"""
# Static checks are done in the base class. Only tensor asserts here.
assert_same_dd = check_ops.assert_equal(
array_ops.shape(x)[-2],
operator.domain_dimension_tensor(),
message=("Incompatible matrix dimensions. "
"shape[-2] of argument to be the same as this operator"))
return assert_same_dd
def assert_is_batch_matrix(tensor):
"""Static assert that `tensor` has rank `2` or higher."""
sh = tensor.get_shape()
if sh.ndims is not None and sh.ndims < 2:
raise ValueError(
"Expected [batch] matrix to have at least two dimensions. Found: "
"%s" % tensor)
def shape_tensor(shape, name=None):
"""Convert Tensor using default type, unless empty list or tuple."""
# Works just like random_ops._ShapeTensor.
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name=name)
################################################################################
# Broadcasting versions of common linear algebra functions.
# TODO(b/77519145) Do this more efficiently in some special cases.
################################################################################
def broadcast_matrix_batch_dims(batch_matrices, name=None):
"""Broadcast leading dimensions of zero or more [batch] matrices.
Example broadcasting one batch dim of two simple matrices.
```python
x = [[1, 2],
[3, 4]] # Shape [2, 2], no batch dims
y = [[[1]]] # Shape [1, 1, 1], 1 batch dim of shape [1]
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
x_bc
==> [[[1, 2],
[3, 4]]] # Shape [1, 2, 2], 1 batch dim of shape [1].
y_bc
==> same as y
```
Example broadcasting many batch dims
```python
x = tf.random.normal(shape=(2, 3, 1, 4, 4))
y = tf.random.normal(shape=(1, 3, 2, 5, 5))
x_bc, y_bc = broadcast_matrix_batch_dims([x, y])
x_bc.shape
==> (2, 3, 2, 4, 4)
y_bc.shape
==> (2, 3, 2, 5, 5)
```
Args:
batch_matrices: Iterable of `Tensor`s, each having two or more dimensions.
name: A string name to prepend to created ops.
Returns:
bcast_matrices: List of `Tensor`s, with `bcast_matricies[i]` containing
the values from `batch_matrices[i]`, with possibly broadcast batch dims.
Raises:
ValueError: If any input `Tensor` is statically determined to have less
than two dimensions.
"""
with ops.name_scope(
name or "broadcast_matrix_batch_dims", values=batch_matrices):
check_ops.assert_proper_iterable(batch_matrices)
batch_matrices = list(batch_matrices)
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = ops.convert_to_tensor(mat)
assert_is_batch_matrix(batch_matrices[i])
if len(batch_matrices) < 2:
return batch_matrices
# Try static broadcasting.
# bcast_batch_shape is the broadcast batch shape of ALL matrices.
# E.g. if batch_matrices = [x, y], with
# x.shape = [2, j, k] (batch shape = [2])
# y.shape = [3, 1, l, m] (batch shape = [3, 1])
# ==> bcast_batch_shape = [3, 2]
bcast_batch_shape = batch_matrices[0].get_shape()[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = array_ops.broadcast_static_shape(
bcast_batch_shape,
mat.get_shape()[:-2])
if bcast_batch_shape.is_fully_defined():
for i, mat in enumerate(batch_matrices):
if mat.get_shape()[:-2] != bcast_batch_shape:
bcast_shape = array_ops.concat(
[bcast_batch_shape.as_list(), array_ops.shape(mat)[-2:]], axis=0)
batch_matrices[i] = array_ops.broadcast_to(mat, bcast_shape)
return batch_matrices
# Since static didn't work, do dynamic, which always copies data.
bcast_batch_shape = array_ops.shape(batch_matrices[0])[:-2]
for mat in batch_matrices[1:]:
bcast_batch_shape = array_ops.broadcast_dynamic_shape(
bcast_batch_shape,
array_ops.shape(mat)[:-2])
for i, mat in enumerate(batch_matrices):
batch_matrices[i] = array_ops.broadcast_to(
mat,
array_ops.concat(
[bcast_batch_shape, array_ops.shape(mat)[-2:]], axis=0))
return batch_matrices
def cholesky_solve_with_broadcast(chol, rhs, name=None):
"""Solve systems of linear equations."""
with ops.name_scope(name, "CholeskySolveWithBroadcast", [chol, rhs]):
chol, rhs = broadcast_matrix_batch_dims([chol, rhs])
return linalg_ops.cholesky_solve(chol, rhs)
def matrix_solve_with_broadcast(matrix, rhs, adjoint=False, name=None):
"""Solve systems of linear equations."""
with ops.name_scope(name, "MatrixSolveWithBroadcast", [matrix, rhs]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
rhs = ops.convert_to_tensor(rhs, name="rhs", dtype=matrix.dtype)
# If either matrix/rhs has extra dims, we can reshape to get rid of them.
matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency(
matrix, rhs, adjoint_a=adjoint)
# This will broadcast by brute force if we still need to.
matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs])
solution = linalg_ops.matrix_solve(
matrix, rhs, adjoint=adjoint and still_need_to_transpose)
return reshape_inv(solution)
def matrix_triangular_solve_with_broadcast(matrix,
rhs,
lower=True,
adjoint=False,
name=None):
"""Solves triangular systems of linear equations with by backsubstitution.
Works identically to `tf.linalg.triangular_solve`, but broadcasts batch dims
of `matrix` and `rhs` (by replicating) if they are determined statically to be
different, or if static shapes are not fully defined. Thus, this may result
in an inefficient replication of data.
Args:
matrix: A Tensor. Must be one of the following types:
`float64`, `float32`, `complex64`, `complex128`. Shape is `[..., M, M]`.
rhs: A `Tensor`. Must have the same `dtype` as `matrix`.
Shape is `[..., M, K]`.
lower: An optional `bool`. Defaults to `True`. Indicates whether the
innermost matrices in `matrix` are lower or upper triangular.
adjoint: An optional `bool`. Defaults to `False`. Indicates whether to solve
with matrix or its (block-wise) adjoint.
name: A name for the operation (optional).
Returns:
`Tensor` with same `dtype` as `matrix` and shape `[..., M, K]`.
"""
with ops.name_scope(name, "MatrixTriangularSolve", [matrix, rhs]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
rhs = ops.convert_to_tensor(rhs, name="rhs", dtype=matrix.dtype)
# If either matrix/rhs has extra dims, we can reshape to get rid of them.
matrix, rhs, reshape_inv, still_need_to_transpose = _reshape_for_efficiency(
matrix, rhs, adjoint_a=adjoint)
# lower indicates whether the matrix is lower triangular. If we have
# manually taken adjoint inside _reshape_for_efficiency, it is now upper tri
if not still_need_to_transpose and adjoint:
lower = not lower
# This will broadcast by brute force if we still need to.
matrix, rhs = broadcast_matrix_batch_dims([matrix, rhs])
solution = linalg_ops.matrix_triangular_solve(
matrix,
rhs,
lower=lower,
adjoint=adjoint and still_need_to_transpose)
return reshape_inv(solution)
def _reshape_for_efficiency(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False):
"""Maybe reshape a, b, and return an inverse map. For matmul/solve."""
def identity(x):
return x
# At this point, we have not taken transpose/adjoint of a/b.
still_need_to_transpose = True
if a.shape.ndims is None or b.shape.ndims is None:
return a, b, identity, still_need_to_transpose
# This could be handled in the future, but seems less common.
if a.shape.ndims >= b.shape.ndims:
return a, b, identity, still_need_to_transpose
# From now on, we might modify b, but will not modify a.
# Suppose:
# a.shape = C + [m, n], b.shape =
# b.shape = S + C + [n, r]
b_extra_ndims = b.shape.ndims - a.shape.ndims
# b_extra_sh = S, b_main_sh = C + [n, r]
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# No reason to flip unless the extra dims of b are big enough. Why?
# Assume adjoint/transpose = False. Then...
# By not flipping, we have to replicate a to shape
# b_extra_sh + a.shape,
# which could use extra memory. But in all cases, the final output has shape
# b_extra_sh + a.shape[:-1] + [b.shape[-1]]
# So we only end up creating a larger object if the end dim of b is smaller
# than the end dim of a. This often happens, e.g. if b was a vector that was
# expanded to a matrix (by appending a singleton).
# Since adjoint/transpose may not be False, we must make adjustments here.
# The dim of b that holds the multiple equations.
a_domain_sz_ = a.shape[-2 if adjoint_a or transpose_a else -1]
b_eq_sz_ = b.shape[-2 if adjoint_b or transpose_b else -1]
b_extra_sz_ = (
np.prod(b.shape[:b_extra_ndims].as_list())
if b.shape[:b_extra_ndims].is_fully_defined() else None)
if (a_domain_sz_ is not None and b_eq_sz_ is not None and
b_extra_sz_ is not None):
if b_extra_sz_ < 2 or a_domain_sz_ <= b_eq_sz_:
return a, b, identity, still_need_to_transpose
# At this point, we're flipping for sure!
# Any transposes/adjoints will happen here explicitly, rather than in calling
# code. Why? To avoid having to write separate complex code for each case.
if adjoint_a:
a = linalg.adjoint(a)
elif transpose_a:
a = linalg.transpose(a)
if adjoint_b:
b = linalg.adjoint(b)
elif transpose_b:
b = linalg.transpose(b)
still_need_to_transpose = False
# Recompute shapes, since the transpose/adjoint may have changed them.
b_extra_sh = array_ops.shape(b)[:b_extra_ndims]
b_main_sh = array_ops.shape(b)[b_extra_ndims:]
# Permutation to put the extra dims at the end.
perm = (
np.concatenate(
(np.arange(b_extra_ndims, b.shape.ndims),
np.arange(0, b_extra_ndims)), 0))
b_extra_on_end = array_ops.transpose(b, perm=perm)
# Now squash this end into one long dim.
b_squashed_end = array_ops.reshape(
b_extra_on_end, array_ops.concat((b_main_sh[:-1], [-1]), 0))
def reshape_inv(y):
# Expand the extra dims hanging off the end, "b_extra_sh".
# Note we use y_sh[:-1] + [b_main_sh[-1]] rather than b_main_sh, because y
# Could have different batch dims than a and b, because of broadcasting.
y_extra_shape = array_ops.concat(
(array_ops.shape(y)[:-1], [b_main_sh[-1]], b_extra_sh), 0)
y_extra_on_end = array_ops.reshape(y, y_extra_shape)
inverse_perm = np.argsort(perm)
return array_ops.transpose(y_extra_on_end, perm=inverse_perm)
return a, b_squashed_end, reshape_inv, still_need_to_transpose
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.cholesky."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_block_diag
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_kronecker
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
# By default, compute the Cholesky of the dense matrix, and return a
# LowerTriangular operator. Methods below specialize this registration.
@linear_operator_algebra.RegisterCholesky(linear_operator.LinearOperator)
def _cholesky_linear_operator(linop):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
linalg_ops.cholesky(linop.to_dense()),
is_non_singular=True,
is_self_adjoint=False,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_diag.LinearOperatorDiag)
def _cholesky_diag(diag_operator):
return linear_operator_diag.LinearOperatorDiag(
math_ops.sqrt(diag_operator.diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_identity.LinearOperatorIdentity)
def _cholesky_identity(identity_operator):
return linear_operator_identity.LinearOperatorIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
batch_shape=identity_operator.batch_shape,
dtype=identity_operator.dtype,
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_identity.LinearOperatorScaledIdentity)
def _cholesky_scaled_identity(identity_operator):
return linear_operator_identity.LinearOperatorScaledIdentity(
num_rows=identity_operator._num_rows, # pylint: disable=protected-access
multiplier=math_ops.sqrt(identity_operator.multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_block_diag.LinearOperatorBlockDiag)
def _cholesky_block_diag(block_diag_operator):
# We take the cholesky of each block on the diagonal.
return linear_operator_block_diag.LinearOperatorBlockDiag(
operators=[
operator.cholesky() for operator in block_diag_operator.operators],
is_non_singular=True,
is_self_adjoint=False,
is_square=True)
@linear_operator_algebra.RegisterCholesky(
linear_operator_kronecker.LinearOperatorKronecker)
def _cholesky_kronecker(kronecker_operator):
# Cholesky decomposition of a Kronecker product is the Kronecker product
# of cholesky decompositions.
return linear_operator_kronecker.LinearOperatorKronecker(
operators=[
operator.cholesky() for operator in kronecker_operator.operators],
is_non_singular=True,
is_self_adjoint=False,
is_square=True)
|
tensorflow-master
|
tensorflow/python/ops/linalg/cholesky_registrations.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registrations for LinearOperator.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_algebra
from tensorflow.python.ops.linalg import linear_operator_circulant
from tensorflow.python.ops.linalg import linear_operator_composition
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.linalg import linear_operator_identity
from tensorflow.python.ops.linalg import linear_operator_lower_triangular
from tensorflow.python.ops.linalg import linear_operator_zeros
from tensorflow.python.ops.linalg import registrations_util
# By default, use a LinearOperatorComposition to delay the computation.
@linear_operator_algebra.RegisterMatmul(
linear_operator.LinearOperator, linear_operator.LinearOperator)
def _matmul_linear_operator(linop_a, linop_b):
"""Generic matmul of two `LinearOperator`s."""
is_square = registrations_util.is_square(linop_a, linop_b)
is_non_singular = None
is_self_adjoint = None
is_positive_definite = None
if is_square:
is_non_singular = registrations_util.combined_non_singular_hint(
linop_a, linop_b)
elif is_square is False: # pylint:disable=g-bool-id-comparison
is_non_singular = False
is_self_adjoint = False
is_positive_definite = False
return linear_operator_composition.LinearOperatorComposition(
operators=[linop_a, linop_b],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
)
# Identity
@linear_operator_algebra.RegisterMatmul(
linear_operator_identity.LinearOperatorIdentity,
linear_operator.LinearOperator)
def _matmul_linear_operator_identity_left(identity, linop):
del identity
return linop
@linear_operator_algebra.RegisterMatmul(
linear_operator.LinearOperator,
linear_operator_identity.LinearOperatorIdentity)
def _matmul_linear_operator_identity_right(linop, identity):
del identity
return linop
# Zeros
@linear_operator_algebra.RegisterMatmul(
linear_operator.LinearOperator,
linear_operator_zeros.LinearOperatorZeros)
def _matmul_linear_operator_zeros_right(linop, zeros):
if not zeros.is_square or not linop.is_square:
raise ValueError("Matmul with non-square `LinearOperator`s or non-square "
"`LinearOperatorZeros` not supported at this time.")
return zeros
@linear_operator_algebra.RegisterMatmul(
linear_operator_zeros.LinearOperatorZeros,
linear_operator.LinearOperator)
def _matmul_linear_operator_zeros_left(zeros, linop):
if not zeros.is_square or not linop.is_square:
raise ValueError("Matmul with non-square `LinearOperator`s or non-square "
"`LinearOperatorZeros` not supported at this time.")
return zeros
# Diag.
@linear_operator_algebra.RegisterMatmul(
linear_operator_diag.LinearOperatorDiag,
linear_operator_diag.LinearOperatorDiag)
def _matmul_linear_operator_diag(linop_a, linop_b):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_a.diag * linop_b.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
@linear_operator_algebra.RegisterMatmul(
linear_operator_diag.LinearOperatorDiag,
linear_operator_identity.LinearOperatorScaledIdentity)
def _matmul_linear_operator_diag_scaled_identity_right(
linop_diag, linop_scaled_identity):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_diag.diag * linop_scaled_identity.multiplier,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterMatmul(
linear_operator_identity.LinearOperatorScaledIdentity,
linear_operator_diag.LinearOperatorDiag)
def _matmul_linear_operator_diag_scaled_identity_left(
linop_scaled_identity, linop_diag):
return linear_operator_diag.LinearOperatorDiag(
diag=linop_diag.diag * linop_scaled_identity.multiplier,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_scaled_identity),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_scaled_identity),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_diag, linop_scaled_identity)),
is_square=True)
@linear_operator_algebra.RegisterMatmul(
linear_operator_diag.LinearOperatorDiag,
linear_operator_lower_triangular.LinearOperatorLowerTriangular)
def _matmul_linear_operator_diag_tril(linop_diag, linop_triangular):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=linop_diag.diag[..., None] * linop_triangular.to_dense(),
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_triangular),
# This is safe to do since the Triangular matrix is only self-adjoint
# when it is a diagonal matrix, and hence commutes.
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_triangular),
is_positive_definite=None,
is_square=True)
@linear_operator_algebra.RegisterMatmul(
linear_operator_lower_triangular.LinearOperatorLowerTriangular,
linear_operator_diag.LinearOperatorDiag)
def _matmul_linear_operator_tril_diag(linop_triangular, linop_diag):
return linear_operator_lower_triangular.LinearOperatorLowerTriangular(
tril=linop_triangular.to_dense() * linop_diag.diag,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_diag, linop_triangular),
# This is safe to do since the Triangular matrix is only self-adjoint
# when it is a diagonal matrix, and hence commutes.
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_diag, linop_triangular),
is_positive_definite=None,
is_square=True)
# Circulant.
@linear_operator_algebra.RegisterMatmul(
linear_operator_circulant.LinearOperatorCirculant,
linear_operator_circulant.LinearOperatorCirculant)
def _matmul_linear_operator_circulant_circulant(linop_a, linop_b):
return linear_operator_circulant.LinearOperatorCirculant(
spectrum=linop_a.spectrum * linop_b.spectrum,
is_non_singular=registrations_util.combined_non_singular_hint(
linop_a, linop_b),
is_self_adjoint=registrations_util.combined_commuting_self_adjoint_hint(
linop_a, linop_b),
is_positive_definite=(
registrations_util.combined_commuting_positive_definite_hint(
linop_a, linop_b)),
is_square=True)
|
tensorflow-master
|
tensorflow/python/ops/linalg/matmul_registrations.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes the adjoint of a `LinearOperator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.util.tf_export import tf_export
__all__ = []
@tf_export("linalg.LinearOperatorAdjoint")
class LinearOperatorAdjoint(linear_operator.LinearOperator):
"""`LinearOperator` representing the adjoint of another operator.
This operator represents the adjoint of another operator.
```python
# Create a 2 x 2 linear operator.
operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]])
operator_adjoint = LinearOperatorAdjoint(operator)
operator_adjoint.to_dense()
==> [[1. + i, 0.]
[3., 1 - i]]
operator_adjoint.shape
==> [2, 2]
operator_adjoint.log_abs_determinant()
==> - log(2)
x = ... Shape [2, 4] Tensor
operator_adjoint.matmul(x)
==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True)
```
#### Performance
The performance of `LinearOperatorAdjoint` depends on the underlying
operators performance.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operator,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorAdjoint`.
`LinearOperatorAdjoint` is initialized with an operator `A`. The `solve`
and `matmul` methods effectively flip the `adjoint` argument. E.g.
```
A = MyLinearOperator(...)
B = LinearOperatorAdjoint(A)
x = [....] # a vector
assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False)
```
Args:
operator: `LinearOperator` object.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is `operator.name +
"_adjoint"`.
Raises:
ValueError: If `operator.is_non_singular` is False.
"""
self._operator = operator
# The congruency of is_non_singular and is_self_adjoint was checked in the
# base operator.
def _combined_hint(hint_str, provided_hint_value, message):
"""Get combined hint in the case where operator.hint should equal hint."""
op_hint = getattr(operator, hint_str)
if op_hint is False and provided_hint_value:
raise ValueError(message)
if op_hint and provided_hint_value is False:
raise ValueError(message)
return (op_hint or provided_hint_value) or None
is_square = _combined_hint(
"is_square", is_square,
"An operator is square if and only if its adjoint is square.")
is_non_singular = _combined_hint(
"is_non_singular", is_non_singular,
"An operator is non-singular if and only if its adjoint is "
"non-singular.")
is_self_adjoint = _combined_hint(
"is_self_adjoint", is_self_adjoint,
"An operator is self-adjoint if and only if its adjoint is "
"self-adjoint.")
is_positive_definite = _combined_hint(
"is_positive_definite", is_positive_definite,
"An operator is positive-definite if and only if its adjoint is "
"positive-definite.")
is_square = _combined_hint(
"is_square", is_square,
"An operator is square if and only if its adjoint is square.")
# Initialization.
if name is None:
name = operator.name + "_adjoint"
with ops.name_scope(name, values=operator.graph_parents):
super(LinearOperatorAdjoint, self).__init__(
dtype=operator.dtype,
graph_parents=operator.graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operator(self):
"""The operator before taking the adjoint."""
return self._operator
def _assert_non_singular(self):
return self.operator.assert_non_singular()
def _assert_positive_definite(self):
return self.operator.assert_positive_definite()
def _assert_self_adjoint(self):
return self.operator.assert_self_adjoint()
def _shape(self):
# Rotate last dimension
shape = self.operator.shape
return shape[:-2].concatenate([shape[-1], shape[-2]])
def _shape_tensor(self):
# Rotate last dimension
shape = self.operator.shape_tensor()
return array_ops.concat([
shape[:-2], [shape[-1], shape[-2]]], axis=-1)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return self.operator.matmul(
x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False, adjoint_arg=False):
return self.operator.matvec(
x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _determinant(self):
if self.is_self_adjoint:
return self.operator.determinant()
return math_ops.conj(self.operator.determinant())
def _log_abs_determinant(self):
return self.operator.log_abs_determinant()
def _trace(self):
if self.is_self_adjoint:
return self.operator.trace()
return math_ops.conj(self.operator.trace())
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.solve(
rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _solvevec(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.solvevec(
rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _to_dense(self):
if self.is_self_adjoint:
return self.operator.to_dense()
return linalg.adjoint(self.operator.to_dense())
def _add_to_tensor(self, x):
return self.to_dense() + x
|
tensorflow-master
|
tensorflow/python/ops/linalg/linear_operator_adjoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant_value."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstantValueOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist='x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]]),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes.
#
# Note: numpy has different default/inferred types than tensorflow.
# Since we are using values, not tensors, we get the default numpy types
# here.
dict(pylist=[], expected_dtype=np.float64),
dict(pylist=[[[], [[[]], []]]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=np.int64),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=np.float64),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=np.dtype('S1')),
dict(pylist=[[True]], expected_dtype=np.bool),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=np.float64),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=np.float32),
dict(pylist=[], dtype=np.dtype('S1')),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=np.dtype('S1')),
)
def testRaggedValues(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_value(pylist).to_list() == pylist`."""
rt = ragged_factory_ops.constant_value(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = self._normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if dtype is not None:
self.assertEqual(rt.dtype, dtype)
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.flat_values.shape[1:], inner_shape)
else:
self.assertEqual(rt.shape, inner_shape)
if expected_shape is not None:
self.assertEqual(tuple(rt.shape), expected_shape)
if rt.shape:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.to_list(), pylist)
else:
self.assertEqual(rt.tolist(), pylist)
if expected_shape is not None:
self.assertEqual(rt.shape, expected_shape)
else:
self.assertEqual(rt, pylist)
if expected_shape is not None:
self.assertEqual((), expected_shape)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=np.array(12),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=array\\(12\\): incompatible with '
'ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(
pylist=[1, 2, 3],
inner_shape=(1, 1),
exception=ValueError,
message='cannot reshape array'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedValuesError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `constant_value()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant_value,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_constant_value_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor.from_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToSparseOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
# The examples from RaggedTensor.from_tensor.__doc__.
dt = constant_op.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
self.assertRaggedEqual(
RaggedTensor.from_tensor(dt), [[5, 7, 0], [0, 3, 0], [6, 0, 0]])
self.assertRaggedEqual(
RaggedTensor.from_tensor(dt, lengths=[1, 0, 3]), [[5], [], [6, 0, 0]])
self.assertRaggedEqual(
RaggedTensor.from_tensor(dt, padding=0), [[5, 7], [0, 3], [6]])
dt_3d = constant_op.constant([[[5, 0], [7, 0], [0, 0]],
[[0, 0], [3, 0], [0, 0]],
[[6, 0], [0, 0], [0, 0]]])
self.assertRaggedEqual(
RaggedTensor.from_tensor(dt_3d, lengths=([2, 0, 3], [1, 1, 2, 0, 1])),
[[[5], [7]], [], [[6, 0], [], [0]]])
@parameterized.parameters(
# 2D test cases, no length or padding.
{
'tensor': [[]],
'expected': [[]],
},
{
'tensor': [[1]],
'expected': [[1]],
},
{
'tensor': [[1, 2]],
'expected': [[1, 2]],
},
{
'tensor': [[1], [2], [3]],
'expected': [[1], [2], [3]],
},
{
'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
'expected': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
},
# 3D test cases, no length or padding
{
'tensor': [[[]]],
'expected': [[[]]],
},
{
'tensor': [[[]]],
'expected': [[[]]],
'ragged_rank': 1,
},
{
'tensor': [[[1]]],
'expected': [[[1]]],
},
{
'tensor': [[[1, 2]]],
'expected': [[[1, 2]]],
},
{
'tensor': [[[1, 2], [3, 4]]],
'expected': [[[1, 2], [3, 4]]],
},
{
'tensor': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]],
'expected': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]],
},
{
'tensor': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]],
'expected': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]],
},
# 2D test cases, with length
{
'tensor': [[1]],
'lengths': [1],
'expected': [[1]]
},
{
'tensor': [[1]],
'lengths': [0],
'expected': [[]]
},
{
'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
'lengths': [0, 1, 2],
'expected': [[], [4], [7, 8]]
},
{
'tensor': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
'lengths': [0, 0, 0],
'expected': [[], [], []]
},
{
'tensor': [[1, 2], [3, 4]],
'lengths': [2, 2],
'expected': [[1, 2], [3, 4]]
},
{
'tensor': [[1, 2], [3, 4]],
'lengths': [7, 8], # lengths > ncols: truncated to ncols
'expected': [[1, 2], [3, 4]]
},
{
'tensor': [[1, 2], [3, 4]],
'lengths': [-2, -1], # lengths < 0: treated as zero
'expected': [[], []]
},
# 3D test cases, with length
{
'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'lengths': [0, 0],
'expected': [[], []]
},
{
'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'lengths': [1, 2],
'expected': [[[1, 2]], [[5, 6], [7, 8]]]
},
{
'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'lengths': [2, 2],
'expected': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
},
# 2D test cases, with padding
{
'tensor': [[1]],
'padding': 0,
'expected': [[1]]
},
{
'tensor': [[0]],
'padding': 0,
'expected': [[]]
},
{
'tensor': [[0, 1]],
'padding': 0,
'expected': [[0, 1]]
},
{
'tensor': [[1, 0]],
'padding': 0,
'expected': [[1]]
},
{
'tensor': [[1, 0, 1, 0, 0, 1, 0, 0]],
'padding': 0,
'expected': [[1, 0, 1, 0, 0, 1]]
},
{
'tensor': [[3, 7, 0, 0], [2, 0, 0, 0], [5, 0, 0, 0]],
'padding': 0,
'expected': [[3, 7], [2], [5]]
},
# 3D test cases, with padding
{
'tensor': [[[1]]],
'padding': [0],
'expected': [[[1]]]
},
{
'tensor': [[[0]]],
'padding': [0],
'expected': [[]]
},
{
'tensor': [[[0, 0], [1, 2]], [[3, 4], [0, 0]]],
'padding': [0, 0],
'expected': [[[0, 0], [1, 2]], [[3, 4]]]
},
# 4D test cases, with padding
{
'tensor': [
[[[1, 2], [3, 4]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[5, 6], [7, 8]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]]
],
'padding': [[0, 0], [0, 0]],
'expected': [
[[[1, 2], [3, 4]]],
[[[0, 0], [0, 0]], [[5, 6], [7, 8]]],
[]
]
},
# 3D test cases, with ragged_rank=2.
{
'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
'ragged_rank': 2,
'expected': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]]
},
{
'tensor': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'ragged_rank': 2,
'lengths': [2, 0, 2, 1],
'expected': [[[1, 2], []], [[5, 6], [7]]]
},
{
'tensor': [[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
'ragged_rank': 2,
'padding': 0,
'expected': [[[1], [2, 3]], [[], [4]]]
},
# 4D test cases, with ragged_rank>1
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'ragged_rank': 2,
'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]]
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'ragged_rank': 3,
'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]]
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'ragged_rank': 2,
'padding': [0, 0],
'expected': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8]]]]
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'lengths': ([2, 2], [1, 2, 2, 1]),
'expected': [[[[1, 0]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8]]]],
'ragged_rank': 2,
'use_ragged_rank': False
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'lengths': [[2, 2], [1, 2, 2, 1]],
'expected': [[[[1, 0]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8]]]],
'ragged_rank': 2,
'use_ragged_rank': False
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'ragged_rank': 3,
'padding': 0,
'expected': [[[[1], [2, 3]], [[], [4]]],
[[[5, 6], [7]], [[0, 8], []]]]
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'lengths': ([2, 2], [2, 2, 2, 2], [1, 2, 0, 1, 2, 1, 2, 0]),
'expected': [[[[1], [2, 3]], [[], [4]]],
[[[5, 6], [7]], [[0, 8], []]]],
'ragged_rank': 3,
'use_ragged_rank': False
},
{
'tensor': [[[[1, 0], [2, 3]], [[0, 0], [4, 0]]],
[[[5, 6], [7, 0]], [[0, 8], [0, 0]]]],
'lengths': [[2, 2], [2, 2, 2, 2], [1, 2, 0, 1, 2, 1, 2, 0]],
'expected': [[[[1], [2, 3]], [[], [4]]],
[[[5, 6], [7]], [[0, 8], []]]],
'ragged_rank': 3,
'use_ragged_rank': False
},
) # pyformat: disable
def testRaggedFromTensor(self,
tensor,
expected,
lengths=None,
padding=None,
ragged_rank=1,
use_ragged_rank=True):
dt = constant_op.constant(tensor)
if use_ragged_rank:
rt = RaggedTensor.from_tensor(dt, lengths, padding, ragged_rank)
else:
rt = RaggedTensor.from_tensor(dt, lengths, padding)
self.assertEqual(type(rt), RaggedTensor)
self.assertEqual(rt.ragged_rank, ragged_rank)
self.assertTrue(
dt.shape.is_compatible_with(rt.shape),
'%s is incompatible with %s' % (dt.shape, rt.shape))
self.assertRaggedEqual(rt, expected)
def testHighDimensions(self):
# Use distinct prime numbers for all dimension shapes in this test, so
# we can see any errors that are caused by mixing up dimension sizes.
dt = array_ops.reshape(
math_ops.range(3 * 5 * 7 * 11 * 13 * 17), [3, 5, 7, 11, 13, 17])
for ragged_rank in range(1, 4):
rt = RaggedTensor.from_tensor(dt, ragged_rank=ragged_rank)
self.assertEqual(type(rt), RaggedTensor)
self.assertEqual(rt.ragged_rank, ragged_rank)
self.assertTrue(
dt.shape.is_compatible_with(rt.shape),
'%s is incompatible with %s' % (dt.shape, rt.shape))
self.assertRaggedEqual(rt, self.evaluate(dt).tolist())
@parameterized.parameters(
# With no padding or lengths
{
'dt_shape': [0, 0],
'expected': []
},
{
'dt_shape': [0, 3],
'expected': []
},
{
'dt_shape': [3, 0],
'expected': [[], [], []]
},
{
'dt_shape': [0, 2, 3],
'expected': []
},
{
'dt_shape': [2, 0, 3],
'expected': [[], []]
},
{
'dt_shape': [2, 3, 0],
'expected': [[[], [], []], [[], [], []]]
},
{
'dt_shape': [2, 3, 0, 1],
'expected': [[[], [], []], [[], [], []]]
},
{
'dt_shape': [2, 3, 1, 0],
'expected': [[[[]], [[]], [[]]], [[[]], [[]], [[]]]]
},
# With padding
{
'dt_shape': [0, 0],
'padding': 0,
'expected': []
},
{
'dt_shape': [0, 3],
'padding': 0,
'expected': []
},
{
'dt_shape': [3, 0],
'padding': 0,
'expected': [[], [], []]
},
{
'dt_shape': [0, 2, 3],
'padding': [0, 0, 0],
'expected': []
},
{
'dt_shape': [2, 0, 3],
'padding': [0, 0, 0],
'expected': [[], []]
},
{
'dt_shape': [2, 3, 0],
'padding': [],
'expected': [[], []]
},
# With lengths
{
'dt_shape': [0, 0],
'lengths': [],
'expected': []
},
{
'dt_shape': [0, 3],
'lengths': [],
'expected': []
},
{
'dt_shape': [3, 0],
'lengths': [0, 0, 0],
'expected': [[], [], []]
},
{
'dt_shape': [3, 0],
'lengths': [2, 3, 4], # lengths > ncols: truncated to ncols
'expected': [[], [], []]
},
{
'dt_shape': [0, 2, 3],
'lengths': [],
'expected': []
},
{
'dt_shape': [2, 0, 3],
'lengths': [0, 0],
'expected': [[], []]
},
{
'dt_shape': [2, 3, 0],
'lengths': [0, 0],
'expected': [[], []]
},
)
def testEmpty(self, dt_shape, expected, lengths=None, padding=None):
dt = array_ops.zeros(dt_shape)
rt = RaggedTensor.from_tensor(dt, lengths, padding)
self.assertEqual(type(rt), RaggedTensor)
self.assertEqual(rt.ragged_rank, 1)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
self.assertRaggedEqual(rt, expected)
@parameterized.parameters(
{
'tensor': [[1]],
'lengths': [0],
'padding': 0,
'error': (ValueError, 'Specify lengths or padding, but not both')
},
{
'tensor': [[1]],
'lengths': [0.5],
'error': (TypeError, 'lengths must be an integer tensor')
},
{
'tensor': [[1, 2, 3]],
'lengths': [[1], [1]],
'error': (ValueError, r'Shape \(1, 3\) must have rank at least 3')
},
{
'tensor': [[1]],
'padding': 'a',
'error': (TypeError, '.*')
},
{
'tensor': [[1]],
'padding': [1],
'error': (ValueError, r'Shapes \(1,\) and \(\) are incompatible')
},
{
'tensor': [[[1]]],
'padding': 1,
'error': (ValueError, r'Shapes \(\) and \(1,\) are incompatible')
},
{
'tensor': [[1]],
'ragged_rank': 'bad',
'error': (TypeError, r'ragged_rank expected int, got \'bad\'')
},
{
'tensor': [[1]],
'ragged_rank': 0,
'error': (ValueError, r'ragged_rank must be greater than 0; got 0')
},
{
'tensor': [[1]],
'ragged_rank': -1,
'error': (ValueError, r'ragged_rank must be greater than 0; got -1')
},
)
def testErrors(self,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
error=None):
dt = constant_op.constant(tensor)
self.assertRaisesRegexp(error[0], error[1], RaggedTensor.from_tensor, dt,
lengths, padding, ragged_rank)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch gather operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
#===============================================================================
# ragged.batch_gather
#===============================================================================
def batch_gather(params, indices, name=None):
"""Gathers slices from `params` according to `indices` with batch dims.
This operation is similar to `gather`, but it assumes that the leading `N`
dimensions of `indices` and `params` are batch dimensions, and performs a
gather within each batch. In particular, when using this operation with `N`
batch dimensions `B1...BN`:
* `indices` has shape `[B1...BN, I]`
* `params` has shape `[B1...BN, P1...PM]`.
* `result` has shape `[B1...BN, I, P2...PM]`.
* `result[b1...bN, i, p2...pM] =
params[b1...bN, indices[b1...bN, i], p2...pM]`
Args:
params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
`M>0`).
indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example:
```python
>>> params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']])
>>> indices = tf.ragged.constant([[1, 2, 0], [], [], [0, 0]])
>>> tf.compat.v1.batch_gather(params, indices)
[['b', 'c', 'a'], [], [], ['e', 'e']]
```
"""
if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)):
return array_ops.batch_gather(params, indices, name)
with ops.name_scope(name, 'RaggedBatchGather', [params, indices]):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
params, indices = ragged_tensor.match_row_splits_dtypes(params, indices)
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError(
'batch_gather does not allow indices with unknown shape.')
if indices_ndims == 0:
raise ValueError('indices.rank must be at least 1.')
if ragged_tensor.is_ragged(indices):
# If the outermost ragged dimension is a batch dimension, recurse.
if indices_ndims > 2:
if not ragged_tensor.is_ragged(params):
raise ValueError('batch shape from indices does '
'not match params shape')
checks = [check_ops.assert_equal(params.row_splits, indices.row_splits)]
with ops.control_dependencies(checks):
return ragged_tensor.RaggedTensor.from_row_splits(
batch_gather(params.values, indices.values), indices.row_splits,
validate=False)
# Otherwise, indices is a 2D ragged tensor with 1 ragged dimension.
else:
# Ensure that `params` is ragged and has at least 2 dimensions.
if not ragged_tensor.is_ragged(params):
if params.shape.ndims is not None and params.shape.ndims < 2:
raise ValueError('batch shape from indices does '
'not match params shape')
params = ragged_tensor.RaggedTensor.from_tensor(
params, ragged_rank=1,
row_splits_dtype=indices.row_splits.dtype)
# Adjust indices from within-batch to global (in params.values), and
# then use ragged.gather to gather them.
num_indices = indices.row_lengths()
params_starts = params.row_starts()
adjustments = ragged_util.repeat(params_starts, num_indices, axis=0)
adjusted_index_values = (
math_ops.cast(indices.values, adjustments.dtype) + adjustments)
return ragged_tensor.RaggedTensor.from_row_splits(
ragged_gather_ops.gather(params.values, adjusted_index_values),
indices.row_splits, validate=False)
else: # params is a RaggedTensor and indices is a Tensor.
if indices_ndims == 1:
return ragged_gather_ops.gather(params, indices)
elif indices_ndims == 2:
# Adjust indices from batch-local to global (in params.values)
adjustments = array_ops.expand_dims(params.row_starts(), 1)
adjusted_indices = (
math_ops.cast(indices, adjustments.dtype) + adjustments)
return ragged_gather_ops.gather(params.values, adjusted_indices)
else:
raise ValueError('batch shape from indices does not match params shape')
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_batch_gather_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration parameters for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def auto_cast_partition_dtype():
"""Whether incopmatible row-partitioning dtypes should be auto-converted.
If true, then operations that combine RaggedTensors but have different
row-partitioning tensor dtypes will be automatically cast to a
compatible dtype (`tf.int64`). If false, then such operations will result
in an error.
Returns:
`bool`
"""
return False
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_config.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_tensor.convert_to_tensor_or_ragged."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConvertToTensorOrRaggedTensorTest(
ragged_test_util.RaggedTensorTestCase, parameterized.TestCase):
#=============================================================================
# Tests where the 'value' param is a RaggedTensor
#=============================================================================
@parameterized.parameters([
dict(pylist=[[1, 2], [3]]),
dict(pylist=[[1, 2], [3]], preferred_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3]], preferred_dtype=dtypes.string),
# Note: Conversion of a single np.array is tested below. These tests
# check nestings consisting of multiple or irregularily-shaped np.arrays.
dict(
pylist=[np.array([1, 2]), np.array([3])],
preferred_dtype=dtypes.string),
dict(pylist=np.array([[1, 2], [3]]), preferred_dtype=dtypes.float32),
dict(pylist=np.array([[1, 2], [3]]), preferred_dtype=dtypes.string),
dict(
pylist=[np.array([[1], np.array([2])]), [np.array([3])]],
preferred_dtype=dtypes.float32),
dict(pylist=[np.array(1)], preferred_dtype=dtypes.string),
])
def testConvertRaggedTensor(self, pylist, dtype=None, preferred_dtype=None):
rt = ragged_factory_ops.constant(pylist)
converted = ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt, dtype, preferred_dtype)
self.assertIs(converted, rt)
@parameterized.parameters([
dict(
pylist=[[1, 2], [3, 4]],
dtype=dtypes.float32,
message=('Tensor conversion requested dtype float32 for '
'RaggedTensor with dtype int32')),
dict(
pylist=np.array([[1, 2], [3, 4]]),
dtype=dtypes.float32,
message=('Tensor conversion requested dtype float32 for '
'RaggedTensor with dtype int32')),
dict(
pylist=[[1, 2], [3, 4]],
dtype=dtypes.string,
message=('Tensor conversion requested dtype string for '
'RaggedTensor with dtype .*')),
])
def testConvertRaggedTensorError(self,
pylist,
message,
dtype=None,
preferred_dtype=None):
rt = ragged_factory_ops.constant(pylist)
with self.assertRaisesRegexp(ValueError, message):
ragged_tensor.convert_to_tensor_or_ragged_tensor(rt, dtype,
preferred_dtype)
#=============================================================================
# Tests where the 'value' param is a RaggedTensorValue
#=============================================================================
@parameterized.parameters(
[
dict(
value=ragged_factory_ops.constant_value([[1, 2], [3]],
dtype=np.int32),
expected_dtype=dtypes.int32),
dict(
value=ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']]),
expected_dtype=dtypes.string),
dict(
value=ragged_factory_ops.constant_value([[1, 2], [3]],
dtype=np.int32),
dtype=dtypes.float32,
expected_dtype=dtypes.float32),
dict(
value=ragged_factory_ops.constant_value([[1, 2], [3]],
dtype=np.int32),
preferred_dtype=dtypes.float32,
expected_dtype=dtypes.float32),
dict(
value=ragged_factory_ops.constant_value([[1, 2], [3]],
dtype=np.int32),
preferred_dtype=dtypes.string,
expected_dtype=dtypes.int32),
])
def testConvertRaggedTensorValue(self,
value,
dtype=None,
preferred_dtype=None,
expected_dtype=None):
if expected_dtype is None:
expected_dtype = value.dtype if dtype is None else dtype
converted = ragged_tensor.convert_to_tensor_or_ragged_tensor(
value, dtype, preferred_dtype)
self.assertEqual(value.ragged_rank, converted.ragged_rank)
self.assertEqual(dtypes.as_dtype(expected_dtype), converted.dtype)
self.assertEqual(value.to_list(), self.eval_to_list(converted))
@parameterized.parameters([
dict(
value=ragged_factory_ops.constant_value([['a', 'b'], ['c']],
dtype=str),
dtype=dtypes.int32,
message=r"invalid literal for int\(\) with base 10: 'a'"),
])
def testConvertRaggedTensorValueError(self,
value,
message,
dtype=None,
preferred_dtype=None):
with self.assertRaisesRegexp(ValueError, message):
ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype,
preferred_dtype)
#=============================================================================
# Tests where the 'value' param is a Tensor
#=============================================================================
@parameterized.parameters([
dict(pylist=[[1, 2], [3, 4]]),
dict(pylist=[[1, 2], [3, 4]], preferred_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3, 4]], preferred_dtype=dtypes.string),
])
def testConvertTensor(self, pylist, dtype=None, preferred_dtype=None):
tensor = constant_op.constant(pylist)
converted = ragged_tensor.convert_to_tensor_or_ragged_tensor(
tensor, dtype, preferred_dtype)
self.assertIs(tensor, converted)
@parameterized.parameters([
dict(
pylist=[[1, 2], [3, 4]],
dtype=dtypes.float32,
message=('Tensor conversion requested dtype float32 for '
'Tensor with dtype int32')),
dict(
pylist=[[1, 2], [3, 4]],
dtype=dtypes.string,
message=('Tensor conversion requested dtype string for '
'Tensor with dtype int32')),
])
def testConvertTensorError(self,
pylist,
message,
dtype=None,
preferred_dtype=None):
tensor = constant_op.constant(pylist)
with self.assertRaisesRegexp(ValueError, message):
ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, dtype,
preferred_dtype)
#=============================================================================
# Tests where the 'value' param is a np.array
#=============================================================================
@parameterized.parameters([
dict(
value=np.array([[1, 2], [3, 4]], dtype=np.int32),
expected_dtype=dtypes.int32),
dict(
value=np.array([[b'a', b'b'], [b'c', b'd']]),
expected_dtype=dtypes.string),
dict(
value=np.array([[1, 2], [3, 4]], dtype=np.int32),
dtype=dtypes.float32,
expected_dtype=dtypes.float32),
dict(
value=np.array([[1, 2], [3, 4]], dtype=np.int32),
preferred_dtype=dtypes.float32,
expected_dtype=dtypes.float32),
dict(
value=np.array([[1, 2], [3, 4]], dtype=np.int32),
preferred_dtype=dtypes.string,
expected_dtype=dtypes.int32),
])
def testConvertNumpyArray(self,
value,
dtype=None,
preferred_dtype=None,
expected_dtype=None):
if expected_dtype is None:
expected_dtype = value.dtype if dtype is None else dtype
converted = ragged_tensor.convert_to_tensor_or_ragged_tensor(
value, dtype, preferred_dtype)
self.assertEqual(dtypes.as_dtype(expected_dtype), converted.dtype)
self.assertAllEqual(value, converted)
@parameterized.parameters([
dict(
value=np.array([['a', 'b'], ['c', 'd']], dtype=str),
dtype=dtypes.int32,
message=r"invalid literal for int\(\) with base 10: 'a'"),
])
def testConvertNumpyArrayError(self,
value,
message,
dtype=None,
preferred_dtype=None):
with self.assertRaisesRegexp(ValueError, message):
ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype,
preferred_dtype)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the segment_id_ops.segment_ids_to_row_splits() op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSplitsToSegmentIdsOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExample(self):
segment_ids = [0, 0, 0, 2, 2, 3, 4, 4, 4]
expected = [0, 3, 3, 5, 6, 9]
splits = segment_id_ops.segment_ids_to_row_splits(segment_ids)
self.assertAllEqual(splits, expected)
def testEmptySegmentIds(self):
# Note: the splits for an empty ragged tensor contains a single zero.
segment_ids = segment_id_ops.segment_ids_to_row_splits([])
self.assertAllEqual(segment_ids, [0])
def testErrors(self):
self.assertRaisesRegexp(TypeError,
r'segment_ids must be an integer tensor.*',
segment_id_ops.segment_ids_to_row_splits,
constant_op.constant([0.5]))
self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1',
segment_id_ops.segment_ids_to_row_splits, 0)
self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1',
segment_id_ops.segment_ids_to_row_splits, [[0]])
def testNumSegments(self):
segment_ids = [0, 0, 0, 2, 2, 3, 4, 4, 4]
num_segments = 7
expected = [0, 3, 3, 5, 6, 9, 9, 9]
splits = segment_id_ops.segment_ids_to_row_splits(segment_ids, num_segments)
self.assertAllEqual(splits, expected)
def testUnsortedSegmentIds(self):
# Segment ids are not required to be sorted.
segment_ids = [0, 4, 3, 2, 4, 4, 2, 0, 0]
splits1 = segment_id_ops.segment_ids_to_row_splits(segment_ids)
expected1 = [0, 3, 3, 5, 6, 9]
splits2 = segment_id_ops.segment_ids_to_row_splits(segment_ids, 7)
expected2 = [0, 3, 3, 5, 6, 9, 9, 9]
self.assertAllEqual(splits1, expected1)
self.assertAllEqual(splits2, expected2)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Concat and stack operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
def concat(values, axis, name=None):
"""Concatenates potentially ragged tensors along one dimension.
Given a list of tensors with the same rank `K` (`K >= axis`), returns a
rank-`K` `RaggedTensor` `result` such that `result[i0...iaxis]` is the
concatenation of `[rt[i0...iaxis] for rt in values]`.
Args:
values: A list of potentially ragged tensors. May not be empty. All
`values` must have the same rank and the same dtype; but unlike
`tf.concat`, they can have arbitrary shapes.
axis: A python integer, indicating the dimension along which to concatenate.
(Note: Unlike `tf.concat`, the `axis` parameter must be statically known.)
Negative values are supported only if the rank of at least one
`values` value is statically known.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` with rank `K`.
`result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`.
Raises:
ValueError: If `values` is empty, if `axis` is out of bounds or if
the input tensors have different ranks.
#### Example:
```python
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
>>> ragged.concat([t1, t2], axis=0)
[[1, 2], [3, 4, 5], [6], [7, 8, 9]]
>>> ragged.concat([t1, t2], axis=1)
[[1, 2, 6], [3, 4, 5, 7, 8, 9]]
```
"""
if not isinstance(values, (list, tuple)):
values = [values]
with ops.name_scope(name, 'RaggedConcat', values):
return _ragged_stack_concat_helper(values, axis, stack_values=False)
def stack(values, axis=0, name=None):
"""Stacks potentially ragged tensors along one dimension.
Given a list of tensors with the same rank `K` (`K >= axis`), returns a
rank-`K+1` `RaggedTensor` `result` such that `result[i0...iaxis]` is the
list `[rt[i0...iaxis] for rt in values]`.
Args:
values: A list of potentially ragged tensors. May not be empty. All
`values` must have the same rank and the same dtype; but unlike
`tf.concat`, they can have arbitrary shapes.
axis: A python integer, indicating the dimension along which to stack.
(Note: Unlike `tf.stack`, the `axis` parameter must be statically known.)
Negative values are supported only if the rank of at least one
`values` value is statically known.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` with rank `K+1`.
`result.ragged_rank=max(axis, max(rt.ragged_rank for rt in values]))`.
Raises:
ValueError: If `values` is empty, if `axis` is out of bounds or if
the input tensors have different ranks.
#### Example:
```python
>>> t1 = tf.ragged.constant([[1, 2], [3, 4, 5]])
>>> t2 = tf.ragged.constant([[6], [7, 8, 9]])
>>> ragged.stack([t1, t2], axis=0)
[[[1, 2], [3, 4, 5]], [[6], [7, 9, 0]]]
>>> ragged.stack([t1, t2], axis=1)
[[[1, 2], [6]], [[3, 4, 5], [7, 8, 9]]]
```
"""
if not isinstance(values, (list, tuple)):
values = [values]
with ops.name_scope(name, 'RaggedConcat', values):
return _ragged_stack_concat_helper(values, axis, stack_values=True)
def _ragged_stack_concat_helper(rt_inputs, axis, stack_values):
"""Helper function to concatenate or stack ragged tensors.
Args:
rt_inputs: A list of RaggedTensors or Tensors to combine.
axis: The axis along which to concatenate or stack.
stack_values: A boolean -- if true, then stack values; otherwise,
concatenate them.
Returns:
A RaggedTensor.
Raises:
ValueError: If rt_inputs is empty, or if axis is out of range.
"""
# Validate parameters.
if not rt_inputs:
raise ValueError('rt_inputs may not be empty.')
# Convert input tensors.
rt_inputs = [
ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input') for rt_input in rt_inputs
]
row_splits_dtype, rt_inputs = ragged_tensor.match_row_splits_dtypes(
*rt_inputs, return_dtype=True)
rt_inputs = list(rt_inputs)
# Special case: if there's only one input, then return it as-is.
if len(rt_inputs) == 1:
if stack_values:
return ragged_array_ops.expand_dims(rt_inputs[0], axis=axis)
else:
return rt_inputs[0]
# Check the rank (number of dimensions) of the input tensors.
ndims = None
for rt in rt_inputs:
if ndims is None:
ndims = rt.shape.ndims
else:
rt.shape.assert_has_rank(ndims)
out_ndims = ndims if (ndims is None or not stack_values) else ndims + 1
axis = ragged_util.get_positive_axis(axis, out_ndims)
# If all the inputs are Tensors, and we're combining the final dimension,
# then we can delegate to the tf.stack/tf.concat operation, and return a
# Tensor.
if all(not ragged_tensor.is_ragged(rt) for rt in rt_inputs):
if ndims is not None and (axis == out_ndims - 1 or axis == ndims - 1):
if stack_values:
return array_ops.stack(rt_inputs, axis)
else:
return array_ops.concat(rt_inputs, axis)
# Convert any Tensor inputs to RaggedTensors. This makes it
# possible to concatenate Tensors and RaggedTensors together.
for i in range(len(rt_inputs)):
if not ragged_tensor.is_ragged(rt_inputs[i]):
rt_inputs[i] = ragged_tensor.RaggedTensor.from_tensor(
rt_inputs[i], ragged_rank=1, row_splits_dtype=row_splits_dtype)
# Convert the input tensors to all have the same ragged_rank.
ragged_rank = max(max(rt.ragged_rank for rt in rt_inputs), 1)
rt_inputs = [_increase_ragged_rank_to(rt, ragged_rank, row_splits_dtype)
for rt in rt_inputs]
if axis == 0:
return _ragged_stack_concat_axis_0(rt_inputs, stack_values)
elif axis == 1:
return _ragged_stack_concat_axis_1(rt_inputs, stack_values)
else: # axis > 1: recurse.
values = [rt.values for rt in rt_inputs]
splits = [[rt_input.row_splits] for rt_input in rt_inputs]
with ops.control_dependencies(ragged_util.assert_splits_match(splits)):
return ragged_tensor.RaggedTensor.from_row_splits(
_ragged_stack_concat_helper(values, axis - 1, stack_values),
splits[0][0], validate=False)
def _ragged_stack_concat_axis_0(rt_inputs, stack_values):
"""Helper function to concatenate or stack ragged tensors along axis 0.
Args:
rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.
stack_values: Boolean. If true, then stack values; otherwise, concatenate
them.
Returns:
A RaggedTensor.
"""
# Concatenate the inner values together.
flat_values = [rt.flat_values for rt in rt_inputs]
concatenated_flat_values = array_ops.concat(flat_values, axis=0)
# Concatenate the splits together for each ragged dimension (adjusting
# split offsets as necessary).
nested_splits = [rt.nested_row_splits for rt in rt_inputs]
ragged_rank = rt_inputs[0].ragged_rank
concatenated_nested_splits = [
_concat_ragged_splits([ns[dim]
for ns in nested_splits])
for dim in range(ragged_rank)
]
# If we are performing a stack operation, then add another splits.
if stack_values:
stack_lengths = array_ops.stack([rt.nrows() for rt in rt_inputs])
stack_splits = ragged_util.lengths_to_splits(stack_lengths)
concatenated_nested_splits.insert(0, stack_splits)
return ragged_tensor.RaggedTensor.from_nested_row_splits(
concatenated_flat_values, concatenated_nested_splits, validate=False)
def _ragged_stack_concat_axis_1(rt_inputs, stack_values):
"""Helper function to concatenate or stack ragged tensors along axis 1.
Args:
rt_inputs: A list of RaggedTensors, all with the same rank and ragged_rank.
stack_values: Boolean. If true, then stack values; otherwise, concatenate
them.
Returns:
A RaggedTensor.
"""
num_inputs = len(rt_inputs)
rt_nrows = rt_inputs[0].nrows()
nrows_msg = 'Input tensors have incompatible shapes.'
nrows_checks = [
check_ops.assert_equal(rt.nrows(), rt_nrows, message=nrows_msg)
for rt in rt_inputs[1:]
]
with ops.control_dependencies(nrows_checks):
# Concatentate the inputs together to put them in a single ragged tensor.
concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False)
# Use ragged.gather to permute the rows of concatenated_rt. In particular,
# permuted_rt = [rt_inputs[0][0], ..., rt_inputs[N][0],
# rt_inputs[0][1], ..., rt_inputs[N][1],
# ...,
# rt_inputs[0][M], ..., rt_input[N][M]]
# where `N=num_inputs-1` and `M=rt_nrows-1`.
row_indices = math_ops.range(rt_nrows * num_inputs)
row_index_matrix = array_ops.reshape(row_indices, [num_inputs, -1])
transposed_row_index_matrix = array_ops.transpose(row_index_matrix)
row_permutation = array_ops.reshape(transposed_row_index_matrix, [-1])
permuted_rt = ragged_gather_ops.gather(concatenated_rt, row_permutation)
if stack_values:
# Add a new splits tensor to group together the values.
stack_splits = math_ops.range(0, rt_nrows * num_inputs + 1, num_inputs)
_copy_row_shape(rt_inputs, stack_splits)
return ragged_tensor.RaggedTensor.from_row_splits(
permuted_rt, stack_splits, validate=False)
else:
# Merge together adjacent rows by dropping the row-split indices that
# separate them.
concat_splits = permuted_rt.row_splits[::num_inputs]
_copy_row_shape(rt_inputs, concat_splits)
return ragged_tensor.RaggedTensor.from_row_splits(
permuted_rt.values, concat_splits, validate=False)
def _copy_row_shape(rt_inputs, splits):
"""Sets splits.shape to [rt[shape[0]+1] for each rt in rt_inputs."""
for rt in rt_inputs:
if rt.shape[0] is not None:
splits.set_shape(tensor_shape.TensorShape(rt.shape[0] + 1))
def _increase_ragged_rank_to(rt_input, ragged_rank, row_splits_dtype):
"""Adds ragged dimensions to `rt_input` so it has the desired ragged rank."""
if ragged_rank > 0:
if not ragged_tensor.is_ragged(rt_input):
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, row_splits_dtype=row_splits_dtype)
if rt_input.ragged_rank < ragged_rank:
rt_input = rt_input.with_values(
_increase_ragged_rank_to(rt_input.values, ragged_rank - 1,
row_splits_dtype))
return rt_input
def _concat_ragged_splits(splits_list):
"""Concatenates a list of RaggedTensor splits to form a single splits."""
pieces = [splits_list[0]]
splits_offset = splits_list[0][-1]
for splits in splits_list[1:]:
pieces.append(splits[1:] + splits_offset)
splits_offset += splits[-1]
return array_ops.concat(pieces, axis=0)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_concat_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_sparse op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToSparseOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExample(self):
rt = ragged_factory_ops.constant([[1, 2, 3], [4], [], [5, 6]])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]])
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6])
self.assertAllEqual(st.dense_shape, [4, 3])
def test2DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'], [],
['g']])
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices, [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [2, 0], [4, 0]])
self.assertAllEqual(st.values, b'a b c d e f g'.split())
self.assertAllEqual(st.dense_shape, [5, 3])
def test3DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]], [[11, 12]], [], [[13, 14]]
],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.indices,
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
[1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 2, 0], [1, 2, 1],
[2, 0, 0], [2, 0, 1], [4, 0, 0], [4, 0, 1]])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [5, 3, 2])
def test4DRaggedTensorWithOneRaggedDimension(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [], [[[9, 10], [11, 12]]]],
ragged_rank=1)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(st.values, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[2, 0, 0, 0], # index for value=9
[2, 0, 0, 1], # index for value=10
[2, 0, 1, 0], # index for value=11
[2, 0, 1, 1], # index for value=12
])
self.assertAllEqual(st.dense_shape, [3, 2, 2, 2])
def test4DRaggedTensorWithTwoRaggedDimensions(self):
rt = ragged_factory_ops.constant(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]]],
[[[11, 12]], [], [[13, 14]]], []],
ragged_rank=2)
st = self.evaluate(rt.to_sparse())
self.assertAllEqual(
st.indices,
[
[0, 0, 0, 0], # index for value=1
[0, 0, 0, 1], # index for value=2
[0, 0, 1, 0], # index for value=3
[0, 0, 1, 1], # index for value=4
[0, 1, 0, 0], # index for value=5
[0, 1, 0, 1], # index for value=6
[0, 1, 1, 0], # index for value=7
[0, 1, 1, 1], # index for value=8
[0, 1, 2, 0], # index for value=9
[0, 1, 2, 1], # index for value=10
[1, 0, 0, 0], # index for value=11
[1, 0, 0, 1], # index for value=12
[1, 2, 0, 0], # index for value=13
[1, 2, 0, 1], # index for value=14
])
self.assertAllEqual(st.values,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
self.assertAllEqual(st.dense_shape, [3, 3, 3, 2])
def testShape(self):
rt = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 2])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [2])
rt = ragged_factory_ops.constant([[[1, 2]], [], [[3, 4]], []],
ragged_rank=1)
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [4, 3])
self.assertEqual(st.values.shape.as_list(), [4])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
rt = ragged_factory_ops.constant([[[1], [2, 3, 4, 5, 6, 7]], [[]]])
st = rt.to_sparse()
self.assertEqual(st.indices.shape.as_list(), [7, 3])
self.assertEqual(st.values.shape.as_list(), [7])
self.assertEqual(st.dense_shape.shape.as_list(), [3])
def testKernelErrors(self):
# An empty vector, defined using a placeholder to ensure that we can't
# determine that it's invalid at graph-construction time.
empty_vector = array_ops.placeholder_with_default(
array_ops.zeros([0], dtypes.int64), shape=None)
bad_rt1 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[2, 3], values=[1, 2, 3], validate=False)
bad_split0 = r'First value of ragged splits must be 0.*'
with self.assertRaisesRegexp(errors.InvalidArgumentError, bad_split0):
self.evaluate(bad_rt1.to_sparse())
bad_rt2 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False)
bad_rt3 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 1],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5], values=empty_vector, validate=False),
validate=False)
split_mismatch1_error = r'Final value of ragged splits must match.*'
for rt in [bad_rt2, bad_rt3]:
with self.assertRaisesRegexp(errors.InvalidArgumentError,
split_mismatch1_error):
self.evaluate(rt.to_sparse())
bad_rt4 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0, 5],
values=ragged_tensor.RaggedTensor.from_row_splits(
row_splits=[0], values=empty_vector, validate=False),
validate=False)
split_mismatch2_error = r'Final value of ragged splits must match.*'
with self.assertRaisesRegexp(errors.InvalidArgumentError,
split_mismatch2_error):
self.evaluate(bad_rt4.to_sparse())
bad_rt5 = ragged_tensor.RaggedTensor.from_row_splits(
row_splits=empty_vector, values=[], validate=False)
empty_splits_error = (r'ragged splits may not be empty.*')
with self.assertRaisesRegexp(errors.InvalidArgumentError,
empty_splits_error):
self.evaluate(bad_rt5.to_sparse())
def testGradient(self):
if context.executing_eagerly():
return
# rt1.shape == rt2.shape == [2, (D2), (D3), 2].
rt1 = ragged_factory_ops.constant(
[[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0]]]], ragged_rank=2)
rt2 = ragged_factory_ops.constant(
[[[[9.0, 8.0], [7.0, 6.0]], [[5.0, 4.0]]]], ragged_rank=2)
rt = ragged_functional_ops.map_flat_values(math_ops.add, rt1, rt2 * 2.0)
st = rt.to_sparse()
g1, g2 = gradients_impl.gradients(st.values,
[rt1.flat_values, rt2.flat_values])
self.assertRaggedEqual(g1, [[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]])
self.assertRaggedEqual(g2, [[2.0, 2.0], [2.0, 2.0], [2.0, 2.0]])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.boolean_mask."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedBooleanMaskOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
# Define short constants for true & false, so the data & mask can be lined
# up in the examples below. This makes it easier to read the examples, to
# see which values should be kept vs. masked.
T = True
F = False
@parameterized.parameters([
#=========================================================================
# Docstring examples
#=========================================================================
dict(
descr='Docstring example 1',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
mask=[[T, F, T], [F, F, F], [T, F, F]],
expected=ragged_factory_ops.constant_value([[1, 3], [], [7]])),
dict(
descr='Docstring example 2',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=ragged_factory_ops.constant_value([[F, F, T], [F], [T, T]]),
expected=ragged_factory_ops.constant_value([[3], [], [5, 6]])),
dict(
descr='Docstring example 3',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]]),
mask=[True, False, True],
expected=ragged_factory_ops.constant_value([[1, 2, 3], [5, 6]])),
#=========================================================================
# Uniform data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[7]; mask.shape=[7]',
data=[1, 2, 3, 4, 5, 6, 7],
mask=[T, F, T, T, F, F, F],
expected=[1, 3, 4]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]],
mask=[True, False, True, True, False],
expected=[[1, 2, 3], [7, 8, 9], [10, 11, 12]]),
dict(
descr='data.shape=[5, 3]; mask.shape=[5, 3]',
data=[[1, 2, 3], [4, 5, 6], [7, 8, 9], [0, 1, 2], [3, 4, 5]],
mask=[[F, F, F], [T, F, T], [T, T, T], [F, F, F], [T, T, F]],
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [7, 8, 9], [], [3, 4]])),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[F, F, T],
expected=[[[2, 4], [6, 8]]]),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[T, F], [T, T], [F, F]],
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, 2, 2]; mask.shape=[3, 2, 2]',
data=[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]],
mask=[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F], [T, T]]],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2], [6, 8]]])),
dict(
descr='data.shape=mask.shape=[2, 2, 2, 2]',
data=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 3], [5, 7]]]],
mask=[[[[T, T], [F, F]], [[T, F], [F, F]]],
[[[F, F], [F, F]], [[T, T], [T, F]]]],
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5], []]], [[[], []], [[1, 3], [5]]]])),
#=========================================================================
# Ragged data and ragged mask.
#=========================================================================
dict(
descr='data.shape=[5, (D2)]; mask.shape=[5, (D2)]',
data=ragged_factory_ops.constant_value(
[[1, 2], [3, 4, 5, 6], [7, 8, 9], [], [1, 2, 3]]),
mask=ragged_factory_ops.constant_value(
[[F, F], [F, T, F, T], [F, F, F], [], [T, F, T]]),
expected=ragged_factory_ops.constant_value(
[[], [4, 6], [], [], [1, 3]])),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4], [6, 8]]]),
mask=ragged_factory_ops.constant_value([[T, F], [T, T], [F, F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []])),
dict(
descr='data.shape=[3, (D2), D3]; mask.shape=[3, (D2)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8], [2, 4]], [[6, 8]]],
ragged_rank=1),
mask=ragged_factory_ops.constant_value([[T, F], [T, T, F], [F]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[5, 6], [7, 8]], []],
ragged_rank=1)),
dict(
descr='data.shape=[3, (D2), (D3)]; mask.shape=[3, (D2), (D3)]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[2, 4]]]),
mask=ragged_factory_ops.constant_value(
[[[T, T], [F, T]], [[F, F], [F, F]], [[T, F]]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2], [4]], [[], []], [[2]]])),
dict(
descr=('data.shape=[3, (D2), (D3), (D4)]; '
'mask.shape=[3, (D2), (D3), (D4)]'),
data=ragged_factory_ops.constant_value(
[[[[1, 2], [3, 4]], [[5, 6]]], [[[2, 4], [6, 8]]]]),
mask=ragged_factory_ops.constant_value(
[[[[T, T], [F, F]], [[T, F]]], [[[F, F], [T, T]]]]),
expected=ragged_factory_ops.constant_value(
[[[[1, 2], []], [[5]]], [[[], [6, 8]]]])),
#=========================================================================
# Ragged mask and uniform data
#=========================================================================
dict(
descr='data.shape=[2, 3]; mask.shape=[2, (3)]',
data=[[1, 2, 3], [4, 5, 6]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value([[1], [5, 6]])),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3)]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value([[T, F, F], [F, T, T]]),
expected=ragged_factory_ops.constant_value(
[[[1, 2]], [[9, 0], [2, 4]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, 3, 2]; mask.shape=[2, (3), 2]',
data=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 0], [2, 4]]],
mask=ragged_factory_ops.constant_value(
[[[T, F], [F, F], [T, T]], [[T, F], [F, T], [F, F]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[1], [], [5, 6]], [[7], [0], []]])),
#=========================================================================
# Ragged data and uniform mask.
#=========================================================================
dict(
descr='data.shape=[4, (D2)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value([[1, 2, 3], [4], [], [5, 6]]),
mask=[T, F, T, F],
expected=ragged_factory_ops.constant_value([[1, 2, 3], []])),
dict(
descr='data.shape=[4, (D2), (D3)]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[4], []], [[5, 6]], []]),
mask=[T, F, T, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[5, 6]], []])),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[4, (D2), 2]; mask.shape=[4]',
data=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [], [[5, 6]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1),
mask=[T, F, F, T],
expected=ragged_factory_ops.constant_value(
[[[1, 2], [3, 4]], [[7, 8], [9, 0], [1, 2]]],
ragged_rank=1)),
dict(
descr='data.shape=[1, (2)]; mask.shape=[1, 2]',
data=ragged_factory_ops.constant_value([[1, 2]]),
mask=[[T, F]],
expected=ragged_factory_ops.constant_value([[1]])),
dict(
descr='data.shape=[2, (2), (D3)]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1], [2, 3]], [[], [4, 5, 6]]]),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value([[[1]], [[], [4, 5, 6]]])),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[T, F], [T, T]],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1)),
dict(
descr='data.shape=[2, (2), 3]; mask.shape=[2, 2, 3]',
data=ragged_factory_ops.constant_value(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]],
ragged_rank=1),
mask=[[[T, F, F], [T, F, T]], [[T, F, T], [F, F, F]]],
expected=ragged_factory_ops.constant_value(
[[[1], [4, 6]], [[7, 9], []]])),
]) # pyformat: disable
def testBooleanMask(self, descr, data, mask, expected):
actual = ragged_array_ops.boolean_mask(data, mask)
self.assertRaggedEqual(actual, expected)
def testErrors(self):
if not context.executing_eagerly():
self.assertRaisesRegexp(ValueError,
r'mask\.shape\.ndims must be known statically',
ragged_array_ops.boolean_mask, [[1, 2]],
array_ops.placeholder(dtypes.bool))
self.assertRaises(TypeError, ragged_array_ops.boolean_mask, [[1, 2]],
[[0, 1]])
self.assertRaisesRegexp(
ValueError, 'Tensor conversion requested dtype bool for '
'RaggedTensor with dtype int32', ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[0, 0]]))
self.assertRaisesRegexp(
ValueError, r'Shapes \(1, 2\) and \(1, 3\) are incompatible',
ragged_array_ops.boolean_mask, [[1, 2]], [[True, False, True]])
self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Inputs must have identical ragged splits',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]),
ragged_factory_ops.constant([[True, False, True]]))
self.assertRaisesRegexp(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask, [[1, 2]], True)
self.assertRaisesRegexp(ValueError, 'mask cannot be scalar',
ragged_array_ops.boolean_mask,
ragged_factory_ops.constant([[1, 2]]), True)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operator Squeeze for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
def squeeze(input, axis=None, name=None): # pylint: disable=redefined-builtin
"""Ragged compatible squeeze.
If `input` is a `tf.Tensor`, then this calls `tf.squeeze`.
If `input` is a `tf.RaggedTensor`, then this operation takes `O(N)` time,
where `N` is the number of elements in the squeezed dimensions.
Args:
input: A potentially ragged tensor. The input to squeeze.
axis: An optional list of ints. Defaults to `None`. If the `input` is
ragged, it only squeezes the dimensions listed. It fails if `input` is
ragged and axis is []. If `input` is not ragged it calls tf.squeeze. Note
that it is an error to squeeze a dimension that is not 1. It must be in
the range of [-rank(input), rank(input)).
name: A name for the operation (optional).
Returns:
A potentially ragged tensor. Contains the same data as input,
but has one or more dimensions of size 1 removed.
"""
with ops.name_scope(name, 'RaggedSqueeze', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
if isinstance(input, ops.Tensor):
return array_ops.squeeze(input, axis, name)
if axis is None:
raise ValueError('Ragged.squeeze must have an axis argument.')
if isinstance(axis, int):
axis = [axis]
elif ((not isinstance(axis, (list, tuple))) or
(not all(isinstance(d, int) for d in axis))):
raise TypeError('Axis must be a list or tuple of integers.')
dense_dims = []
ragged_dims = []
# Normalize all the dims in axis to be positive
axis = [ragged_util.get_positive_axis(d, input.shape.ndims) for d in axis]
for dim in axis:
if dim > input.ragged_rank:
dense_dims.append(dim - input.ragged_rank)
else:
ragged_dims.append(dim)
# Make sure the specified ragged dimensions are squeezable.
assertion_list = []
scalar_tensor_one = constant_op.constant(1, dtype=input.row_splits.dtype)
for i, r in enumerate(input.nested_row_lengths()):
if i + 1 in ragged_dims:
assertion_list.append(
control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(r, scalar_tensor_one)),
['the given axis (axis = %d) is not squeezable!' % (i + 1)]))
if 0 in ragged_dims:
scalar_tensor_two = constant_op.constant(2, dtype=dtypes.int32)
assertion_list.append(
control_flow_ops.Assert(
math_ops.equal(
array_ops.size(input.row_splits), scalar_tensor_two),
['the given axis (axis = 0) is not squeezable!']))
# Till now, we are sure that the ragged dimensions are squeezable.
squeezed_rt = None
squeezed_rt = control_flow_ops.with_dependencies(assertion_list,
input.flat_values)
if dense_dims:
# Gives error if the dense dimension is not squeezable.
squeezed_rt = array_ops.squeeze(squeezed_rt, dense_dims)
remaining_row_splits = []
remaining_row_splits = list()
for i, row_split in enumerate(input.nested_row_splits):
# each row_splits tensor is for dimension #(i+1) .
if (i + 1) not in ragged_dims:
remaining_row_splits.append(row_split)
# Take care of the first row if it is to be squeezed.
if remaining_row_splits and 0 in ragged_dims:
remaining_row_splits.pop(0)
squeezed_rt = RaggedTensor.from_nested_row_splits(squeezed_rt,
remaining_row_splits)
# Corner case: when removing all the ragged dimensions and the output is
# a scalar tensor e.g. ragged.squeeze(ragged.constant([[[1]]])).
if set(range(0, input.ragged_rank + 1)).issubset(set(ragged_dims)):
squeezed_rt = array_ops.squeeze(squeezed_rt, [0], name)
return squeezed_rt
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_squeeze_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_map_ops.map_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops as mo
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedMapOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# The following test sets map over a RaggedTensor and apply a
# transformation that returns with shape:
# [d1, (d2)] -> [d1]
dict(
fn=mo.reduce_mean,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[2, 4, 6],
),
dict(
fn=string_ops.reduce_join,
elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']],
expected_output=[b'foobarbaz', b'a', b'bc'],
dtype=dtypes.string,
),
# [d1, (d2)] -> [d1, 2]
dict(
fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]),
# fn=self.stack_mean_and_sum,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 6], [4.5, 9], [6.5, 13]],
dtype=dtypes.float32,
expected_ragged_rank=0,
),
# [d1, (d2)] -> [d1, (d2)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 3, 4], [5, 6], [7, 8]],
dtype=dtypes.int64,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), d3] -> [d1, (d2), d3]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
elems_ragged_rank=1,
expected_ragged_rank=1,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]],
),
# [d1, (d2)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_functional_ops.map_flat_values(mo.add, x, 1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[6], [9, 13]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1, (d3)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=0),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[1, 2, 3], [10, 12]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1]
dict(
fn=ragged_math_ops.reduce_sum,
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[6, 22],
result_dtype=dtypes.int64,
),
# [d1] -> [d1, (d2)]
dict(
fn=mo.range,
elems=[4, 0, 2],
expected_output=[[0, 1, 2, 3], [], [0, 1]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_math_ops.range(mo.range(x)),
elems=[5, 0, 3],
expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [],
[[], [0], [0, 1]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]],
expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9],
[]]]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=4),
),
])
def testRaggedMap(
self,
fn,
elems,
expected_output,
expected_ragged_rank=None,
result_ragged_rank=None,
elems_ragged_rank=None,
dtype=dtypes.int64,
result_dtype=None,
infer_shape=False,
):
elems = ragged_factory_ops.constant(elems, dtype, elems_ragged_rank)
output = ragged_map_ops.map_fn(
fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape)
expected_rt = ragged_factory_ops.constant(
expected_output, ragged_rank=expected_ragged_rank)
self.assertRaggedEqual(expected_rt, output)
def testRaggedMapOnStructure(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _reduce_sum_from_all(f):
return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin'])
output = ragged_map_ops.map_fn(
fn=_reduce_sum_from_all,
elems=features,
dtype=dtypes.int32,
)
self.assertRaggedEqual(output, [66, 44, 198])
# Test mapping over a dict of RTs can produce a dict of RTs.
def testRaggedMapOnStructure_RaggedOutputs(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _increment(f):
return {
'batman': f['batman'] + 1,
'robin': f['robin'] + 1,
}
output = ragged_map_ops.map_fn(
fn=_increment,
elems=features,
infer_shape=False,
dtype={
'batman':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1),
'robin':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1)
},
)
self.assertRaggedEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]])
self.assertRaggedEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]])
def testZip(self):
x = ragged_factory_ops.constant(
[[10, 20], [30, 40], [50, 60], [70], [80, 90, 100]], dtypes.int64)
y = array_ops.expand_dims(mo.range(x.nrows(out_type=dtypes.int64)), axis=1)
def _zip(foo):
y_val, x_val = foo
bar = backend.tile(y_val, array_ops.shape(x_val))
return array_ops.stack([bar, x_val], axis=1)
output = ragged_map_ops.map_fn(
_zip, (y, x),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1),
infer_shape=False)
self.assertRaggedEqual(
output, [[[0, 10], [0, 20]], [[1, 30], [1, 40]], [[2, 50], [2, 60]],
[[3, 70]], [[4, 80], [4, 90], [4, 100]]])
def testBatchGather(self):
tokens = ragged_factory_ops.constant([['hello', '.', 'there'], ['merhaba'],
['bonjour', '.', 'ca va', '?']])
indices = ragged_factory_ops.constant([[0, 2], [0], [0, 2]])
def gather(x):
tokens_val, indices_val = x
return array_ops.gather(tokens_val, indices_val)
data = tokens, indices
out = ragged_map_ops.map_fn(
gather,
data,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.string, ragged_rank=1),
infer_shape=False)
self.assertRaggedEqual(
out, [[b'hello', b'there'], [b'merhaba'], [b'bonjour', b'ca va']])
def testMismatchRaggedRank(self):
elems = ragged_factory_ops.constant([[[1, 2, 3]], [[4, 5], [6, 7]]])
fn = lambda x: ragged_math_ops.reduce_sum(x, axis=0)
with self.assertRaisesWithLiteralMatch(
ValueError, r'The declared ragged rank (23) mismatches the result (1)'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=23))
def testMismatchRaggedRank2(self):
elems = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [6, 7]])
fn = lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0])
with self.assertRaisesWithLiteralMatch(
ValueError, r'The declared ragged rank (10) mismatches the result (2)'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=10))
def testMapOnSparseTensor(self):
s = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
values=[0, 5, 0, 4],
dense_shape=[2, 2],
)
t2 = ragged_tensor.RaggedTensor.from_sparse(s)
id_t2 = ragged_map_ops.map_fn(
lambda x: x, t2,
)
self.assertRaggedEqual(id_t2, [[0, 5], [0, 4]])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for ragged tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util.tf_export import tf_export
@tf_export("ragged.map_flat_values")
def map_flat_values(op, *args, **kwargs):
"""Applies `op` to the values of one or more RaggedTensors.
Replaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values`
tensor, and then calls `op`. Returns a `RaggedTensor` that is constructed
from the input `RaggedTensor`s' `nested_row_splits` and the value returned by
the `op`.
If the input arguments contain multiple `RaggedTensor`s, then they must have
identical `nested_row_splits`.
Examples:
```python
>>> rt = ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> ragged.map_flat_values(tf.ones_like, rt).eval().tolist()
[[1, 1, 1], [], [1, 1], [1]]
>>> ragged.map_flat_values(tf.multiply, rt, rt).eval().tolist()
[[1, 4, 9], [], [16, 25], [36]]
>>> ragged.map_flat_values(tf.add, rt, 5).eval().tolist()
[[6, 7, 8], [], [9, 10], [11]]
```
Args:
op: The operation that should be applied to the RaggedTensor `flat_values`.
`op` is typically an element-wise operation (such as math_ops.add), but
any operation that preserves the size of the outermost dimension can be
used. I.e., `shape[0]` of the value returned by `op` must match
`shape[0]` of the `RaggedTensor`s' `flat_values` tensors.
*args: Arguments for `op`.
**kwargs: Keyword arguments for `op`.
Returns:
A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all
input `RaggedTensor`s.
Raises:
ValueError: If args contains no `RaggedTensors`, or if the `nested_splits`
of the input `RaggedTensor`s are not identical.
"""
# Replace RaggedTensors with their values; and collect the splits tensors
# from each RaggedTensor.
nested_splits_lists = []
inner_args = _replace_ragged_with_flat_values(args, nested_splits_lists)
inner_kwargs = _replace_ragged_with_flat_values(kwargs, nested_splits_lists)
if not nested_splits_lists:
return op(*args, **kwargs)
split_dtypes = set(splits[0].dtype for splits in nested_splits_lists)
if len(split_dtypes) > 1:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
nested_splits_lists = [
[math_ops.cast(s, dtypes.int64) for s in nested_splits] # pylint: disable=g-complex-comprehension
for nested_splits in nested_splits_lists]
with ops.control_dependencies(
ragged_util.assert_splits_match(nested_splits_lists)):
# Delegate to op, and then compose the result from the transformed values
# and the splits.
return ragged_tensor.RaggedTensor.from_nested_row_splits(
op(*inner_args, **inner_kwargs), nested_splits_lists[0], validate=False)
def _replace_ragged_with_flat_values(value, nested_splits_lists):
"""Replace RaggedTensors with their flat_values, and record their splits.
Returns a copy of `value`, with any nested `RaggedTensor`s replaced by their
`flat_values` tensor. Looks inside lists, tuples, and dicts.
Appends each `RaggedTensor`'s `nested_splits` to `nested_splits_lists`.
Args:
value: The value that should be transformed by replacing `RaggedTensors`.
nested_splits_lists: An output parameter used to record the `nested_splits`
for any `RaggedTensors` that were replaced.
Returns:
A copy of `value` with nested `RaggedTensors` replaced by their `values`.
"""
# Base case
if ragged_tensor.is_ragged(value):
value = ragged_tensor.convert_to_tensor_or_ragged_tensor(value)
nested_splits_lists.append(value.nested_row_splits)
return value.flat_values
# Recursion cases
def recurse(v):
return _replace_ragged_with_flat_values(v, nested_splits_lists)
if isinstance(value, list):
return [recurse(v) for v in value]
elif isinstance(value, tuple):
return tuple(recurse(v) for v in value)
elif isinstance(value, dict):
return dict((k, recurse(v)) for (k, v) in value.items())
else:
return value
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_functional_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the segment_id_ops.row_splits_to_segment_ids() op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSplitsToSegmentIdsOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExample(self):
splits = [0, 3, 3, 5, 6, 9]
expected = [0, 0, 0, 2, 2, 3, 4, 4, 4]
segment_ids = segment_id_ops.row_splits_to_segment_ids(splits)
self.assertAllEqual(segment_ids, expected)
def testEmptySplits(self):
# Note: the splits for an empty ragged tensor contains a single zero.
segment_ids = segment_id_ops.row_splits_to_segment_ids([0])
self.assertAllEqual(segment_ids, [])
def testErrors(self):
self.assertRaisesRegexp(ValueError, r'Invalid row_splits: \[\]',
segment_id_ops.row_splits_to_segment_ids, [])
self.assertRaisesRegexp(
ValueError, r'splits must have dtype int32 or int64',
segment_id_ops.row_splits_to_segment_ids,
constant_op.constant([0.5]))
self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1',
segment_id_ops.row_splits_to_segment_ids, 0)
self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1',
segment_id_ops.row_splits_to_segment_ids, [[0]])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for overloaded RaggedTensor operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedElementwiseOpsTest(ragged_test_util.RaggedTensorTestCase):
def testOrderingOperators(self):
x = ragged_factory_ops.constant([[1, 5], [3]])
y = ragged_factory_ops.constant([[4, 5], [1]])
self.assertRaggedEqual((x > y), [[False, False], [True]])
self.assertRaggedEqual((x >= y), [[False, True], [True]])
self.assertRaggedEqual((x < y), [[True, False], [False]])
self.assertRaggedEqual((x <= y), [[True, True], [False]])
def testArithmeticOperators(self):
x = ragged_factory_ops.constant([[1.0, -2.0], [8.0]])
y = ragged_factory_ops.constant([[4.0, 4.0], [2.0]])
self.assertRaggedEqual(abs(x), [[1.0, 2.0], [8.0]])
self.assertRaggedEqual((-x), [[-1.0, 2.0], [-8.0]])
self.assertRaggedEqual((x + y), [[5.0, 2.0], [10.0]])
self.assertRaggedEqual((3.0 + y), [[7.0, 7.0], [5.0]])
self.assertRaggedEqual((x + 3.0), [[4.0, 1.0], [11.0]])
self.assertRaggedEqual((x - y), [[-3.0, -6.0], [6.0]])
self.assertRaggedEqual((3.0 - y), [[-1.0, -1.0], [1.0]])
self.assertRaggedEqual((x + 3.0), [[4.0, 1.0], [11.0]])
self.assertRaggedEqual((x * y), [[4.0, -8.0], [16.0]])
self.assertRaggedEqual((3.0 * y), [[12.0, 12.0], [6.0]])
self.assertRaggedEqual((x * 3.0), [[3.0, -6.0], [24.0]])
self.assertRaggedEqual((x / y), [[0.25, -0.5], [4.0]])
self.assertRaggedEqual((y / x), [[4.0, -2.0], [0.25]])
self.assertRaggedEqual((2.0 / y), [[0.5, 0.5], [1.0]])
self.assertRaggedEqual((x / 2.0), [[0.5, -1.0], [4.0]])
self.assertRaggedEqual((x // y), [[0.0, -1.0], [4.0]])
self.assertRaggedEqual((y // x), [[4.0, -2.0], [0.0]])
self.assertRaggedEqual((2.0 // y), [[0.0, 0.0], [1.0]])
self.assertRaggedEqual((x // 2.0), [[0.0, -1.0], [4.0]])
self.assertRaggedEqual((x % y), [[1.0, 2.0], [0.0]])
self.assertRaggedEqual((y % x), [[0.0, -0.0], [2.0]])
self.assertRaggedEqual((2.0 % y), [[2.0, 2.0], [0.0]])
self.assertRaggedEqual((x % 2.0), [[1.0, 0.0], [0.0]])
def testLogicalOperators(self):
a = ragged_factory_ops.constant([[True, True], [False]])
b = ragged_factory_ops.constant([[True, False], [False]])
self.assertRaggedEqual((~a), [[False, False], [True]])
self.assertRaggedEqual((a & b), [[True, False], [False]])
self.assertRaggedEqual((a & True), [[True, True], [False]])
self.assertRaggedEqual((True & b), [[True, False], [False]])
self.assertRaggedEqual((a | b), [[True, True], [False]])
self.assertRaggedEqual((a | False), [[True, True], [False]])
self.assertRaggedEqual((False | b), [[True, False], [False]])
self.assertRaggedEqual((a ^ b), [[False, True], [False]])
self.assertRaggedEqual((a ^ True), [[False, False], [True]])
self.assertRaggedEqual((True ^ b), [[False, True], [True]])
def testDummyOperators(self):
a = ragged_factory_ops.constant([[True, True], [False]])
with self.assertRaisesRegexp(TypeError,
'RaggedTensor may not be used as a boolean.'):
bool(a)
with self.assertRaisesRegexp(TypeError,
'RaggedTensor may not be used as a boolean.'):
if a:
pass
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_operators_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.size."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_squeeze_op
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSqueezeTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
{
'input_list': []
},
{
'input_list': [[]],
'squeeze_ranks': [0]
},
{
'input_list': [[[[], []], [[], []]]],
'squeeze_ranks': [0]
},
])
def test_passing_empty(self, input_list, squeeze_ranks=None):
rt = ragged_squeeze_op.squeeze(
ragged_factory_ops.constant(input_list), squeeze_ranks)
dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks)
self.assertRaggedEqual(ragged_conversion_ops.to_tensor(rt), dt)
@parameterized.parameters([
{
'input_list': [[1]],
'squeeze_ranks': [0]
},
{
'input_list': [[1]],
'squeeze_ranks': [0, 1]
},
{
'input_list': [[1, 2]],
'squeeze_ranks': [0]
},
{
'input_list': [[1], [2]],
'squeeze_ranks': [1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [1, 3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 1, 3]
},
{
'input_list': [[[1], [2]], [[3], [4]]],
'squeeze_ranks': [2]
},
{
'input_list': [[1], [2]],
'squeeze_ranks': [-1]
},
])
def test_passing_simple(self, input_list, squeeze_ranks=None):
rt = ragged_squeeze_op.squeeze(
ragged_factory_ops.constant(input_list), squeeze_ranks)
dt = array_ops.squeeze(constant_op.constant(input_list), squeeze_ranks)
self.assertRaggedEqual(ragged_conversion_ops.to_tensor(rt), dt)
@parameterized.parameters([
# ragged_conversion_ops.from_tensor does not work for this
# {'input_list': [1]},
{
'input_list': [[1]],
'squeeze_ranks': [0]
},
{
'input_list': [[1, 2]],
'squeeze_ranks': [0]
},
{
'input_list': [[1], [2]],
'squeeze_ranks': [1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 1]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [1, 3]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 1, 3]
},
{
'input_list': [[[1], [2]], [[3], [4]]],
'squeeze_ranks': [2]
},
])
def test_passing_simple_from_dense(self, input_list, squeeze_ranks=None):
dt = constant_op.constant(input_list)
rt = ragged_conversion_ops.from_tensor(dt)
rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks)
dt_s = array_ops.squeeze(dt, squeeze_ranks)
self.assertRaggedEqual(ragged_conversion_ops.to_tensor(rt_s), dt_s)
@parameterized.parameters([
{
'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]],
'output_list': [[[1], [1, 2]], [[], []]],
'squeeze_ranks': [0, 2, 4]
},
{
'input_list': [[[[[[1]], [[1, 2]]]], [[[[]], [[]]]]]],
'output_list': [[[[[1]], [[1, 2]]]], [[[[]], [[]]]]],
'squeeze_ranks': [0]
},
])
def test_passing_ragged(self, input_list, output_list, squeeze_ranks=None):
rt = ragged_factory_ops.constant(input_list)
rt_s = ragged_squeeze_op.squeeze(rt, squeeze_ranks)
ref = ragged_factory_ops.constant(output_list)
self.assertRaggedEqual(rt_s, ref)
def test_passing_text(self):
rt = ragged_factory_ops.constant([[[[[[[['H']], [['e']], [['l']], [['l']],
[['o']]],
[[['W']], [['o']], [['r']], [['l']],
[['d']], [['!']]]]],
[[[[['T']], [['h']], [['i']], [['s']]],
[[['i']], [['s']]],
[[['M']], [['e']], [['h']], [['r']],
[['d']], [['a']], [['d']]],
[[['.']]]]]]]])
output_list = [[['H', 'e', 'l', 'l', 'o'], ['W', 'o', 'r', 'l', 'd', '!']],
[['T', 'h', 'i', 's'], ['i', 's'],
['M', 'e', 'h', 'r', 'd', 'a', 'd'], ['.']]]
ref = ragged_factory_ops.constant(output_list)
rt_s = ragged_squeeze_op.squeeze(rt, [0, 1, 3, 6, 7])
self.assertRaggedEqual(rt_s, ref)
@parameterized.parameters([
{
'input_list': [[]],
'squeeze_ranks': [1]
},
{
'input_list': [[1, 2]],
'squeeze_ranks': [1]
},
{
'input_list': [[1], [2]],
'squeeze_ranks': [0]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 2]
},
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [2]
},
{
'input_list': [[[1], [2]], [[3], [4]]],
'squeeze_ranks': [0]
},
{
'input_list': [[[1], [2]], [[3], [4]]],
'squeeze_ranks': [1]
},
{
'input_list': [[], []],
'squeeze_ranks': [1]
},
{
'input_list': [[[], []], [[], []]],
'squeeze_ranks': [1]
},
])
def test_failing_InvalidArgumentError(self, input_list, squeeze_ranks):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
ragged_squeeze_op.squeeze(
ragged_factory_ops.constant(input_list), squeeze_ranks))
@parameterized.parameters([
{
'input_list': [[]]
},
{
'input_list': [[1]]
},
{
'input_list': [[1, 2]]
},
{
'input_list': [[[1], [2]], [[3], [4]]]
},
{
'input_list': [[1]]
},
{
'input_list': [[[1], [2]], [[3], [4]]]
},
{
'input_list': [[[[12], [11]]]]
},
])
def test_failing_no_squeeze_dim_specified(self, input_list):
with self.assertRaises(ValueError):
ragged_squeeze_op.squeeze(ragged_factory_ops.constant(input_list))
@parameterized.parameters([
{
'input_list': [[[[12], [11]]]],
'squeeze_ranks': [0, 1, 3]
},
])
def test_failing_axis_is_not_a_list(self, input_list, squeeze_ranks):
with self.assertRaises(TypeError):
tensor_ranks = constant_op.constant(squeeze_ranks)
ragged_squeeze_op.squeeze(
ragged_factory_ops.constant(input_list), tensor_ranks)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_squeeze_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.expand_dims."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedExpandDimsOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
# An example 4-d ragged tensor with shape [3, (D2), (D3), 2], and the
# expected result calling for expand_dims on each axis. c.f. the table of
# expected result shapes in the ragged_array_ops.expand_dims docstring.
EXAMPLE4D = [[[[1, 1], [2, 2]], [[3, 3]]],
[],
[[], [[4, 4], [5, 5], [6, 6]]]] # pyformat: disable
EXAMPLE4D_EXPAND_AXIS = {
0: [EXAMPLE4D],
1: [[d0] for d0 in EXAMPLE4D],
2: [[[d1] for d1 in d0] for d0 in EXAMPLE4D],
3: [[[[d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D],
4: [[[[[d3] for d3 in d2] for d2 in d1] for d1 in d0] for d0 in EXAMPLE4D]
}
@parameterized.parameters([
#=========================================================================
# Docstring examples: 2D Ragged Inputs
dict(rt_input=[[1, 2], [3]],
axis=0,
expected=[[[1, 2], [3]]],
expected_shape=[1, None, None]),
dict(rt_input=[[1, 2], [3]],
axis=1,
expected=[[[1, 2]], [[3]]],
expected_shape=[2, None, None]),
dict(rt_input=[[1, 2], [3]],
axis=2,
expected=[[[1], [2]], [[3]]],
expected_shape=[2, None, 1]),
#=========================================================================
# 2D Tensor Inputs
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=0,
expected=[[[1, 2], [3, 4], [5, 6]]],
expected_shape=[1, 3, 2]),
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=1,
expected=[[[1, 2]], [[3, 4]], [[5, 6]]],
expected_shape=[3, 1, 2]),
dict(rt_input=[[1, 2], [3, 4], [5, 6]],
ragged_rank=0,
axis=2,
expected=[[[1], [2]], [[3], [4]], [[5], [6]]],
expected_shape=[3, 2, 1]),
#=========================================================================
# 4D Ragged Inputs: [3, (D2), (D3), 2]
# c.f. the table of expected result shapes in the expand_dims docstring.
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=0,
expected=EXAMPLE4D_EXPAND_AXIS[0],
expected_shape=[1, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=1,
expected=EXAMPLE4D_EXPAND_AXIS[1],
expected_shape=[3, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=2,
expected=EXAMPLE4D_EXPAND_AXIS[2],
expected_shape=[3, None, None, None, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=3,
expected=EXAMPLE4D_EXPAND_AXIS[3],
expected_shape=[3, None, None, 1, 2]),
dict(rt_input=EXAMPLE4D,
ragged_rank=2,
axis=4,
expected=EXAMPLE4D_EXPAND_AXIS[4],
expected_shape=[3, None, None, 2, 1]),
]) # pyformat: disable
def testRaggedExpandDims(self,
rt_input,
axis,
expected,
ragged_rank=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
expanded = ragged_array_ops.expand_dims(rt, axis=axis)
self.assertEqual(expanded.shape.ndims, rt.shape.ndims + 1)
if expected_shape is not None:
self.assertEqual(expanded.shape.as_list(), expected_shape)
self.assertRaggedEqual(expanded, expected)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_expand_dims_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_factory_ops.constant."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import ragged
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConstOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist=b'x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]]),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes
dict(pylist=[], expected_dtype=dtypes.float32),
dict(pylist=[[[], [[[]], []]]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=dtypes.int32),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=dtypes.float32),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=dtypes.float32),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=dtypes.string),
dict(pylist=[[True]], expected_dtype=dtypes.bool),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=dtypes.float32),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=dtypes.float32),
dict(pylist=[], dtype=dtypes.string),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=dtypes.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=dtypes.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=dtypes.string),
)
def testRaggedConst(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_const(pylist).eval().tolist() == pylist`.
Args:
pylist: The `pylist` argument for `ragged_const()`.
dtype: The `dtype` argument for `ragged_const()`. If not None, then also
test that the resulting ragged tensor has this `dtype`.
ragged_rank: The `ragged_rank` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`ragged_rank`.
inner_shape: The `inner_shape` argument for `ragged_const()`. If not
None, then also test that the resulting ragged tensor has this
`inner_shape`.
expected_shape: The expected shape for the resulting ragged tensor.
expected_dtype: The expected dtype for the resulting ragged tensor (used
to test default/inferred types when dtype=None).
"""
rt = ragged_factory_ops.constant(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = self._normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if dtype is not None:
self.assertEqual(rt.dtype, dtype)
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor.RaggedTensor):
self.assertEqual(rt.flat_values.shape.as_list()[1:], list(inner_shape))
else:
self.assertEqual(rt.shape.as_list(), list(inner_shape))
if expected_shape is not None:
self.assertEqual(tuple(rt.shape.as_list()), expected_shape)
self.assertRaggedEqual(rt, pylist)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(pylist=[1, 2, 3], inner_shape=(1, 1), exception=TypeError),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedConstError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `ragged_const()` raises an expected exception."""
self.assertRaisesRegexp(
exception,
message,
ragged_factory_ops.constant,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
@parameterized.parameters([
dict(pylist=9, scalar_depth=0, max_depth=0),
dict(pylist=[9], scalar_depth=1, max_depth=1),
dict(pylist=[1, 2, 3], scalar_depth=1, max_depth=1),
dict(pylist=[[1], [2]], scalar_depth=2, max_depth=2),
dict(pylist=[[[1], [2]], [[3]]], scalar_depth=3, max_depth=3),
dict(pylist=[], scalar_depth=None, max_depth=1),
dict(pylist=[[]], scalar_depth=None, max_depth=2),
dict(pylist=[[], [], []], scalar_depth=None, max_depth=2),
dict(pylist=[[[], []], [[], [[[]]]], []], scalar_depth=None, max_depth=5),
dict(
pylist=[1, [2]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], 2],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[[1]], []], [[2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
])
def testScalarAndMaxDepthHelper(self,
pylist,
scalar_depth=None,
max_depth=None,
exception=None,
message=None):
"""Tests for the _find_scalar_and_max_depth helper function."""
if exception is not None:
self.assertRaisesRegexp(exception, message,
ragged_factory_ops._find_scalar_and_max_depth,
pylist)
else:
self.assertEqual(
ragged_factory_ops._find_scalar_and_max_depth(pylist),
(scalar_depth, max_depth))
@parameterized.parameters([
dict(pylist=[[1], [2, 3]], ragged_rank=1, inner_shape=()),
dict(
pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=1,
inner_shape=(1,)),
dict(pylist=[[[1], [2]], [[3], [4], [5]]], ragged_rank=2, inner_shape=()),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=1,
inner_shape=(2, 3)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=2,
inner_shape=(3,)),
dict(
pylist=[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [2, 4, 6]]]],
ragged_rank=3,
inner_shape=()),
dict(
pylist=[[[1], [2, 3]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[1], [[2]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[[1]], [2]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
])
def testDefaultInnerShapeForPylistHelper(self,
pylist,
ragged_rank,
inner_shape=None,
exception=None,
message=None):
"""Tests for the _default_inner_shape_for_pylist helper function."""
if exception is not None:
self.assertRaisesRegexp(
exception, message,
ragged.ragged_factory_ops._default_inner_shape_for_pylist, pylist,
ragged_rank)
else:
self.assertEqual(
ragged.ragged_factory_ops._default_inner_shape_for_pylist(
pylist, ragged_rank), inner_shape)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_const_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_gather_ops.gather_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedGatherNdOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
DOCSTRING_PARAMS = [[['000', '001'], ['010']],
[['100'], ['110', '111', '112'], ['120']],
[[], ['210']]] # pyformat: disable
@parameterized.parameters([
#=========================================================================
# Docstring Examples
#=========================================================================
dict(
descr='Docstring example 1',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2], [0]],
expected=ragged_factory_ops.constant_value(
[[[], [b'210']], [[b'000', b'001'], [b'010']]])),
dict(
descr='Docstring example 2',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[2, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'210'], [b'000', b'001']])),
dict(
descr='Docstring example 3',
params=ragged_factory_ops.constant_value(DOCSTRING_PARAMS),
indices=[[0, 0, 1], [1, 1, 2]],
expected=[b'001', b'112']),
#=========================================================================
# Indices with 0 values (selects the entire params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [0], result: [B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[b'a', b'b', b'c'], [b'd']])),
dict(
descr='params: [B1, (B2)], indices: [A1, 0], result: [A1, B1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]])),
dict(
descr=('params: [B1, (B2)], indices: [A1, A2, 0], '
'result: [A1, A2, B1, (B2)]'),
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=np.zeros([1, 3, 0], dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']],
[[b'a', b'b', b'c'], [b'd']]]])),
dict(
descr='params: [B1], indices: [A1, (A2), 0], result: [A1, (A2), B1]',
params=['a'],
indices=ragged_factory_ops.constant_value(
[[[], []], [[]]],
ragged_rank=1,
dtype=np.int32),
expected=ragged_factory_ops.constant_value(
[[[b'a'], [b'a']], [[b'a']]],
ragged_rank=1)),
#=========================================================================
# Indices with 1 value (selects row from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 1], result: [A1, (B2)]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1], [0]],
expected=ragged_factory_ops.constant_value(
[[b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 1], '
'result: [A1, (B2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1], [1]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f']], [[b'e', b'f']]])),
dict(
descr=('params: [B1, B2, B3], indices: [A1, (A2), 1], '
'result: [A1, (A2), B2, B3]'),
params=[[['a']], [['b']]],
indices=ragged_factory_ops.constant_value([[[0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[[b'a']]]], ragged_rank=1)),
#=========================================================================
# Indices with 2 values (selects row & col from params)
#=========================================================================
dict(
descr='params: [B1, (B2)], indices: [A1, 2], result: [A1]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d']]),
indices=[[1, 0], [0, 0], [0, 2]],
expected=ragged_factory_ops.constant_value([b'd', b'a', b'c'])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 2], '
'result: [A1, (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0], [0, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']])),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, A2, 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd'], [b'a', b'b', b'c']]])),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, 2], '
'result: [A1, A2, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[1, 0], [0, 1], [0, 0]]],
expected=[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, A2, A3, 2], '
'result: [A1, A2, A3, B3]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']],
[['e', 'f']]],
ragged_rank=1),
indices=[[[[1, 0], [0, 1], [0, 0]]]],
expected=[[[[b'e', b'f'], [b'c', b'd'], [b'a', b'b']]]]),
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, (A2), 2], '
'result: [A1, (A2), (B3)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=ragged_factory_ops.constant_value(
[[[1, 0], [0, 1]], [[0, 0]]],
ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'e', b'f'], [b'd']], [[b'a', b'b', b'c']]])),
#=========================================================================
# Indices with 3 values
#=========================================================================
dict(
descr=('params: [B1, (B2), (B3)], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]]),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), B3], indices: [A1, 3], '
'result: [A1]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b'], ['c', 'd']], [['e', 'f']]],
ragged_rank=1),
indices=[[1, 0, 1], [0, 0, 0], [0, 1, 1]],
expected=[b'f', b'a', b'd']),
dict(
descr=('params: [B1, (B2), (B3), B4], indices: [A1, 3], '
'result: [A1, B4]'),
params=ragged_factory_ops.constant_value(
[[[['a', 'b'], ['c', 'd']], [['e', 'f']]]],
ragged_rank=2),
indices=[[0, 0, 1], [0, 0, 0], [0, 1, 0]],
expected=[[b'c', b'd'], [b'a', b'b'], [b'e', b'f']]),
]) # pyformat: disable
def testRaggedGatherNd(self, descr, params, indices, expected):
result = ragged_gather_ops.gather_nd(params, indices)
self.assertRaggedEqual(result, expected)
def testRaggedGatherNdUnknownRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd']])
indices1 = array_ops.placeholder(dtypes.int32, shape=None)
indices2 = array_ops.placeholder(dtypes.int32, shape=[None])
with self.assertRaisesRegexp(ValueError,
'indices.rank be statically known.'):
ragged_gather_ops.gather_nd(params, indices1)
with self.assertRaisesRegexp(
ValueError, r'indices.shape\[-1\] must be statically known.'):
ragged_gather_ops.gather_nd(params, indices2)
@parameterized.parameters([
dict(
params=['a'],
indices=0,
error=(ValueError, errors.InvalidArgumentError)),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=0,
message='indices.rank must be at least 1.'),
dict(
params=['a', 'b', 'c'],
indices=ragged_factory_ops.constant_value([[0]]),
message='The innermost dimension of indices may not be ragged'),
])
def testRaggedGatherNdStaticError(self,
params,
indices,
message=None,
error=ValueError):
with self.assertRaisesRegexp(error, message):
ragged_gather_ops.gather_nd(params, indices)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_functional_ops.map_flat_values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedMapInnerValuesOpTest(ragged_test_util.RaggedTensorTestCase):
def assertRaggedMapInnerValuesReturns(self,
op,
expected,
args=(),
kwargs=None):
kwargs = kwargs or {}
result = ragged_functional_ops.map_flat_values(op, *args, **kwargs)
self.assertRaggedEqual(result, expected)
def testDocStringExamples(self):
"""Test the examples in apply_op_to_ragged_values.__doc__."""
rt = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6]])
v1 = ragged_functional_ops.map_flat_values(array_ops.ones_like, rt)
v2 = ragged_functional_ops.map_flat_values(math_ops.multiply, rt, rt)
v3 = ragged_functional_ops.map_flat_values(math_ops.add, rt, 5)
self.assertRaggedEqual(v1, [[1, 1, 1], [], [1, 1], [1]])
self.assertRaggedEqual(v2, [[1, 4, 9], [], [16, 25], [36]])
self.assertRaggedEqual(v3, [[6, 7, 8], [], [9, 10], [11]])
def testOpWithSingleRaggedTensorArg(self):
tensor = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=array_ops.zeros_like,
args=(tensor,),
expected=[[0, 0, 0], [], [0, 0]])
def testOpWithTwoRaggedTensorArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(x, y), expected=[[3, 2, 12], [], [4, 25]])
def testOpWithRaggedTensorAndScalarArgs(self):
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(5, y), expected=[[5, 10, 15], [], [20, 25]])
def testOpWithThreeRaggedTensorArgs(self):
condition = ragged_factory_ops.constant(
[[True, True, False], [], [True, False]]) # pyformat: disable
x = ragged_factory_ops.constant([['a', 'b', 'c'], [], ['d', 'e']])
y = ragged_factory_ops.constant([['A', 'B', 'C'], [], ['D', 'E']])
self.assertRaggedMapInnerValuesReturns(
op=array_ops.where,
args=(condition, x, y),
expected=[[b'a', b'b', b'C'], [], [b'd', b'E']])
def testOpWithRaggedTensorListArg(self):
x = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
y = ragged_factory_ops.constant([[10, 20, 30], [], [40, 50]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.add_n,
args=([x, y, x],),
expected=[[12, 24, 36], [], [48, 60]])
def testOpWithKeywordArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
kwargs=dict(x=x, y=y),
expected=[[3, 2, 12], [], [4, 25]])
def testOpWithMixedPositionalAndKeywordArgs(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x,),
kwargs=dict(y=y),
expected=[[3, 2, 12], [], [4, 25]])
def testNonElementWiseOp(self):
x = ragged_factory_ops.constant(
[[[3, 1, 4], [1, 5, 9], [2, 6, 5]], [], [[3, 5, 8], [9, 7, 9]]],
ragged_rank=1)
self.assertRaggedMapInnerValuesReturns(
op=math_ops.reduce_sum,
kwargs={
'input_tensor': x,
'axis': 1,
},
expected=[[8, 15, 13], [], [16, 25]])
def testOpWithRaggedRankGreaterThanOne(self):
# ragged_rank=0
x0 = [3, 1, 4, 1, 5, 9, 2, 6, 5]
y0 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertRaggedEqual(
math_ops.multiply(x0, y0), [3, 2, 12, 4, 25, 54, 14, 48, 45])
# ragged_rank=1
x1 = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5], [9, 2], [6, 5]])
y1 = ragged_factory_ops.constant([[1, 2, 3], [], [4, 5], [6, 7], [8, 9]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x1, y1),
expected=[[3, 2, 12], [], [4, 25], [54, 14], [48, 45]])
# ragged_rank=2
x2 = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]],
[[9, 2], [6, 5]]])
y2 = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]],
[[6, 7], [8, 9]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x2, y2),
expected=[[[3, 2, 12]], # row 0
[], # row 1
[[], [4, 25]], # row 2
[[54, 14], [48, 45]] # row 3
]) # pyformat: disable
# ragged_rank=3
x3 = ragged_factory_ops.constant([[[[3, 1, 4]], []], [], [[[], [1, 5]]],
[[[9, 2], [6, 5]]]])
y3 = ragged_factory_ops.constant([[[[1, 2, 3]], []], [], [[[], [4, 5]]],
[[[6, 7], [8, 9]]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x3, y3),
expected=[
[[[3, 2, 12]], []], # row 0
[], # row 1
[[[], [4, 25]]], # row 2
[[[54, 14], [48, 45]]] # row 3
]) # pyformat: disable
def testOpWithRaggedRankThree(self):
x = ragged_factory_ops.constant([[[3, 1, 4]], [], [[], [1, 5]]])
y = ragged_factory_ops.constant([[[1, 2, 3]], [], [[], [4, 5]]])
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply,
args=(x, y),
expected=[[[3, 2, 12]], [], [[], [4, 25]]])
def testOpWithInnerValuesOnly(self):
x = constant_op.constant([[1, 2], [3, 4], [5, 6]])
y = constant_op.constant(2)
self.assertRaggedMapInnerValuesReturns(
op=math_ops.multiply, args=(x, y), expected=[[2, 4], [6, 8], [10, 12]])
def testRaggedTensorSplitsRaggedRankMismatchError(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[[3, 1, 4], []], [], [[1, 5]]])
self.assertRaisesRegexp(
ValueError, r'Inputs must have identical ragged splits.*',
ragged_functional_ops.map_flat_values, math_ops.add, x, y)
def testRaggedTensorSplitsValueMismatchError(self):
x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
y = ragged_factory_ops.constant([[1], [2, 3], [4, 5]])
self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Inputs must have identical ragged splits.*',
ragged_functional_ops.map_flat_values, math_ops.add,
x, y)
def testRaggedTensorSplitsMismatchErrorAtRuntime(self):
splits1 = array_ops.placeholder_with_default(
constant_op.constant([0, 3, 3, 5], dtypes.int64), None)
splits2 = array_ops.placeholder_with_default(
constant_op.constant([0, 1, 3, 5], dtypes.int64), None)
x = ragged_tensor.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1)
y = ragged_tensor.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'.*Inputs must have identical ragged splits'):
self.evaluate(ragged_functional_ops.map_flat_values(math_ops.add, x, y))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_math_ops.reduce_<AGGREGATE> ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
_MAX_INT32 = dtypes.int32.max
_MIN_INT32 = dtypes.int32.min
_NAN = np.nan
def mean(*values):
return 1.0 * sum(values) / len(values)
@test_util.run_all_in_graph_and_eager_modes
class RaggedReduceOpsTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# Docstring examples. RaggedTensor for testing is:
# [[3, 1, 4],
# [1, 5, ],
# [9, ],
# [2, 6 ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-2,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[54, 30, 4] # = [3*1*9*2, 1*5*6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[True, True, False, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, True, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[False, True, False, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, False, False]),
#=========================================================================
# Examples with the following RaggedTensor (ragged_rank=1):
# [[0, 1, 2, 3],
# [4 ],
# [ ],
# [5, 6 ],
# [7 ],
# [8, 9 ]]
#=========================================================================
# axis=None
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=min(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=max(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=mean(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
# axis=0
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 + 4 + 5 + 7 + 8, 1 + 6 + 9, 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 * 4 * 5 * 7 * 8, 1 * 6 * 9, 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[min(0, 4, 5, 7, 8), min(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[max(0, 4, 5, 7, 8), max(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[mean(0, 4, 5, 7, 8),
mean(1, 6, 9), 2, 3]),
# axis=1
# Note: we don't test mean here because it gives a NaN, and this will
# cause assertEqual to fail (since NaN != NaN). See testMeanNan().
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 * 1 * 2 * 3, 4, 1, 5 * 6, 7, 8 * 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[min(0, 1, 2, 3), 4, _MAX_INT32,
min(5, 6), 7,
min(8, 9)]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[max(0, 1, 2, 3), 4, _MIN_INT32,
max(5, 6), 7,
max(8, 9)]),
#=========================================================================
# Examples with ragged_rank=2:
# [[[1, 2], [ ], [3, 4, 5]],
# [[6, 7], [ ], [8 ]],
# [ ],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[],
expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=None,
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=0,
expected=[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=1,
expected=[[1 + 3, 2 + 4, 5], [6 + 8, 7], [], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=2,
expected=[[1 + 2, 0, 3 + 4 + 5], [6 + 7, 0, 8], [], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1],
expected=[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 2],
expected=[1 + 6 + 9 + 2 + 7, 0, 3 + 8 + 4 + 5]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[1, 2],
expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1, 2],
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
#=========================================================================
# Examples for ragged_reduce_mean ragged_rank=2:
# [[[1, 2], [3, 4, 5]],
# [[6, 7], [8 ]],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=0,
expected=[[mean(1, 6, 9), mean(2, 7)], [mean(3, 8), 4, 5]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=1,
expected=[[mean(1, 3), mean(2, 4), 5], [mean(6, 8), 7], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=2,
expected=[[mean(1, 2), mean(3, 4, 5)], [mean(6, 7), 8], [9]]),
# Test case for GitHub issue 27497, multiple negative axes.
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[-2, -1],
expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[-3, -2, -1],
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
)
def testReduce(self, ragged_reduce_op, rt_input, axis, expected):
rt_input = ragged_factory_ops.constant(rt_input)
reduced = ragged_reduce_op(rt_input, axis)
self.assertRaggedEqual(reduced, expected)
def assertEqualWithNan(self, actual, expected):
"""Like assertEqual, but NaN==NaN."""
self.assertTrue(
((actual == expected) | (np.isnan(actual) & np.isnan(expected))).all())
def testMeanNan(self):
rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]]
expected = (
np.array([0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]) / np.array(
[4, 1, 0, 2, 1, 2]))
rt_input = ragged_factory_ops.constant(rt_as_list)
reduced = ragged_math_ops.reduce_mean(rt_input, axis=1)
self.assertEqualWithNan(self.evaluate(reduced), expected)
def testMeanWithTensorInputs(self):
tensor = [[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]]
expected = [2.0, 20.0]
reduced = ragged_math_ops.reduce_mean(tensor, axis=1)
self.assertRaggedEqual(reduced, expected)
def testErrors(self):
rt_input = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
axis = array_ops.placeholder_with_default(constant_op.constant([0]), None)
if not context.executing_eagerly():
self.assertRaisesRegexp(
ValueError, r'axis must be known at graph construction time.',
ragged_math_ops.reduce_sum, rt_input, axis)
self.assertRaisesRegexp(TypeError, r'axis must be an int; got str.*',
ragged_math_ops.reduce_sum, rt_input, ['x'])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_reduce_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.where."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedWhereOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Docstring Examples
#=========================================================================
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
expected=[[0, 0], [0, 2], [1, 1]]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
x=ragged_factory_ops.constant_value(
[['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value(
[['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'b', b'C'], [b'd', b'E']])),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value([True, False]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B', b'C'], [b'd', b'e']])),
#=========================================================================
# Coordinate-retrieval mode
#=========================================================================
dict( # shape=[D1]
condition=[True, False, True, False, True],
expected=[[0], [2], [4]]),
dict( # shape=[D1, D2]
condition=[[True, False], [False, True]],
expected=[[0, 0], [1, 1]]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
expected=[[0, 0], [0, 2], [1, 1]]),
dict( # shape=[D1, (D2), (D3)]
condition=ragged_factory_ops.constant_value([
[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]
]),
expected=[[0, 0, 0], [0, 0, 2], [0, 1, 1],
[1, 0, 0], [1, 3, 1]]),
dict( # shape=[D1, (D2), D3]
condition=ragged_factory_ops.constant_value([
[[True, False], [False, True]],
[[True, False], [False, False], [True, False], [False, True]]
], ragged_rank=1),
expected=[[0, 0, 0], [0, 1, 1],
[1, 0, 0], [1, 2, 0], [1, 3, 1]]),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([
[[[], [True]]],
[[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]]
]),
expected=[[0, 0, 1, 0],
[1, 0, 0, 0], [1, 0, 0, 2], [1, 0, 1, 1],
[1, 1, 0, 0], [1, 1, 3, 1]]),
#=========================================================================
# Elementwise value-selection mode
#=========================================================================
dict( # shape=[]
condition=True, x='A', y='a', expected=b'A'),
dict( # shape=[]
condition=False, x='A', y='a', expected=b'a'),
dict( # shape=[D1]
condition=[True, False, True],
x=['A', 'B', 'C'],
y=['a', 'b', 'c'],
expected=[b'A', b'b', b'C']),
dict( # shape=[D1, D2]
condition=[[True, False], [False, True]],
x=[['A', 'B'], ['D', 'E']],
y=[['a', 'b'], ['d', 'e']],
expected=[[b'A', b'b'], [b'd', b'E']]),
dict( # shape=[D1, (D2)]
condition=ragged_factory_ops.constant_value(
[[True, False, True], [False, True]]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'b', b'C'], [b'd', b'E']])),
dict( # shape=[D1, (D2), D3]
condition=ragged_factory_ops.constant_value([
[[True, False], [False, True]],
[[True, False], [False, False], [True, False], [False, True]]
], ragged_rank=1),
x=ragged_factory_ops.constant_value([
[['A', 'B'], ['C', 'D']],
[['E', 'F'], ['G', 'H'], ['I', 'J'], ['K', 'L']]
], ragged_rank=1),
y=ragged_factory_ops.constant_value([
[['a', 'b'], ['c', 'd']],
[['e', 'f'], ['g', 'h'], ['i', 'j'], ['k', 'l']]
], ragged_rank=1),
expected=ragged_factory_ops.constant_value([
[[b'A', b'b'], [b'c', b'D']],
[[b'E', b'f'], [b'g', b'h'], [b'I', b'j'], [b'k', b'L']]
], ragged_rank=1)),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([
[[[], [True]]],
[[[True, False, True], [False, True]],
[[True], [], [False], [False, True, False]]]
]),
x=ragged_factory_ops.constant_value([
[[[], ['A']]],
[[['B', 'C', 'D'], ['E', 'F']],
[['G'], [], ['H'], ['I', 'J', 'K']]]
]),
y=ragged_factory_ops.constant_value([
[[[], ['a']]],
[[['b', 'c', 'd'], ['e', 'f']],
[['g'], [], ['h'], ['i', 'j', 'k']]]
]),
expected=ragged_factory_ops.constant_value([
[[[], [b'A']]],
[[[b'B', b'c', b'D'], [b'e', b'F']],
[[b'G'], [], [b'h'], [b'i', b'J', b'k']]]
])),
#=========================================================================
# Elementwise row-selection mode
#=========================================================================
dict( # x.shape=[D1, D2], y.shape=[D1, D2]
condition=[True, False, True],
x=[['A', 'B'], ['C', 'D'], ['E', 'F']],
y=[['a', 'b'], ['c', 'd'], ['e', 'f']],
expected=[[b'A', b'B'], [b'c', b'd'], [b'E', b'F']]),
dict( # x.shape=[D1, D2], y.shape=[D1, (D2)]
condition=[True, False, True],
x=[['A', 'B'], ['C', 'D'], ['E', 'F']],
y=ragged_factory_ops.constant_value(
[['a', 'b'], ['c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B'], [b'c'], [b'E', b'F']])),
dict( # x.shape=[D1, (D2)], y.shape=[D1, (D2)]
condition=[True, False, True],
x=ragged_factory_ops.constant_value(
[['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]),
y=ragged_factory_ops.constant_value(
[['a', 'b'], ['c'], ['d', 'e']]),
expected=ragged_factory_ops.constant_value(
[[b'A', b'B', b'C'], [b'c'], [b'F', b'G']])),
dict( # shape=[D1, (D2), (D3), (D4)]
condition=ragged_factory_ops.constant_value([True, False]),
x=ragged_factory_ops.constant_value([
[[[], ['A']]],
[[['B', 'C', 'D'], ['E', 'F']],
[['G'], [], ['H'], ['I', 'J', 'K']]]
]),
y=ragged_factory_ops.constant_value([[[['a']]], [[['b']]]]),
expected=ragged_factory_ops.constant_value(
[[[[], [b'A']]], [[[b'b']]]])),
]) # pyformat: disable
def testRaggedWhere(self, condition, expected, x=None, y=None):
result = ragged_where_op.where(condition, x, y)
self.assertRaggedEqual(result, expected)
@parameterized.parameters([
dict(
condition=[True, False],
x=[1, 2],
error=ValueError,
message='x and y must be either both None or both non-None'),
dict(
condition=ragged_factory_ops.constant_value([[True, False, True],
[False, True]]),
x=ragged_factory_ops.constant_value([['A', 'B', 'C'], ['D', 'E']]),
y=[['a', 'b'], ['d', 'e']],
error=ValueError,
message='Input shapes do not match.'),
])
def testRaggedWhereErrors(self, condition, error, message, x=None, y=None):
with self.assertRaisesRegexp(error, message):
ragged_where_op.where(condition, x, y)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_where_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_batch_gather_ops.batch_gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedBatchGatherOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Docstring Example
#=========================================================================
dict(
descr='Docstring example',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d'], [],
['e']]),
indices=ragged_factory_ops.constant_value([[1, 2, 0], [], [], [0,
0]]),
expected=ragged_factory_ops.constant_value([[b'b', b'c', b'a'], [],
[], [b'e', b'e']])),
#=========================================================================
# 0 Batch Dimensions
#=========================================================================
dict(
descr='params: [P1], indices: [I], result: [I]',
params=['a', 'b', 'c', 'd'],
indices=[3, 2],
expected=[b'd', b'c']),
dict(
descr='params: [P1, (P2)], indices: [I], result: [I, (P2)]',
params=ragged_factory_ops.constant_value([['a', 'b'], [], ['c'],
['d', 'e']]),
indices=[3, 2],
expected=ragged_factory_ops.constant_value([[b'd', b'e'], [b'c']])),
#=========================================================================
# 1 Batch Dimension
#=========================================================================
dict(
descr='params: [B1, P1], indices: [B1, I], result: [B1, I]',
params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
indices=[[2, 0], [0, 1], [1, 0]],
expected=[[b'c', b'a'], [b'd', b'e'], [b'h', b'g']]),
dict(
descr='params: [B1, (P1)], indices: [B1, I], result: [B1, I]',
params=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d', 'e'],
['g']]),
indices=[[2, 0], [0, 1], [0, 0]],
expected=[[b'c', b'a'], [b'd', b'e'], [b'g', b'g']]),
dict(
descr='params: [B1, P1], indices: [B1, (I)], result: [B1, (I)]',
params=[['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']],
indices=ragged_factory_ops.constant_value([[2, 0, 2], [0], [1]]),
expected=ragged_factory_ops.constant_value([[b'c', b'a', b'c'],
[b'd'], [b'h']])),
dict(
descr=('params: [B1, (P1), (P2), P3], indices: [B1, I], '
'result: [B1, I, (P2), P3]'),
params=ragged_factory_ops.constant_value(
[[[['a']], [['b'], ['c']]], [[['d'], ['e']], [['f']]], [[['g']]]],
ragged_rank=2),
indices=[[1, 0], [0, 1], [0, 0]],
expected=ragged_factory_ops.constant_value(
[[[[b'b'], [b'c']], [[b'a']]], [[[b'd'], [b'e']], [[b'f']]],
[[[b'g']], [[b'g']]]],
ragged_rank=2)),
#=========================================================================
# 2 Batch Dimensions
#=========================================================================
dict(
descr=('params: [B1, B2, P1], indices: [B1, B2, I], '
'result: [B1, B2, I]'),
params=[[['a', 'b', 'c']], [['d', 'e', 'f']], [['g', 'h', 'i']]],
indices=[[[2, 0]], [[0, 1]], [[1, 0]]],
expected=[[[b'c', b'a']], [[b'd', b'e']], [[b'h', b'g']]]),
dict(
descr=('params: [B1, (B2), P1], indices: [B1, (B2), I], '
'result: [B1, (B2), I]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],
ragged_rank=1),
indices=ragged_factory_ops.constant_value(
[[[2, 0], [0, 1]], [[1, 0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'a'], [b'd', b'e']], [[b'h', b'g']]], ragged_rank=1)),
dict(
descr=('params: [B1, (B2), (P1)], indices: [B1, (B2), I], '
'result: [B1, (B2), I]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d']], [['e', 'f']]], ragged_rank=2),
indices=ragged_factory_ops.constant_value(
[[[2, 0], [0, 0]], [[1, 0]]], ragged_rank=1),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]], ragged_rank=1)),
dict(
descr=('params: [B1, (B2), P1], indices: [B1, (B2), (I)], '
'result: [B1, (B2), (I)]'),
params=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i']]],
ragged_rank=1),
indices=ragged_factory_ops.constant_value(
[[[2, 1, 0], [0]], [[1, 1]]], ragged_rank=2),
expected=ragged_factory_ops.constant_value(
[[[b'c', b'b', b'a'], [b'd']], [[b'h', b'h']]], ragged_rank=2)),
#=========================================================================
# 3 Batch Dimensions
#=========================================================================
dict(
descr=(
'params: [B1, (B2), (B3), (P1)], indices: [B1, (B2), (B3), I], '
'result: [B1, (B2), (B3), I]'),
params=ragged_factory_ops.constant_value(
[[[['a', 'b', 'c'], ['d']], [['e', 'f']]]], ragged_rank=3),
indices=ragged_factory_ops.constant_value(
[[[[2, 0], [0, 0]], [[1, 0]]]], ragged_rank=2),
expected=ragged_factory_ops.constant_value(
[[[[b'c', b'a'], [b'd', b'd']], [[b'f', b'e']]]], ragged_rank=2)),
])
def testRaggedBatchGather(self, descr, params, indices, expected):
result = ragged_batch_gather_ops.batch_gather(params, indices)
self.assertRaggedEqual(result, expected)
@parameterized.parameters([
# Docstring example:
dict(
descr='Docstring example',
params=[['a', 'b', 'c'], ['d'], [], ['e']],
indices=[[1, 2, -1], [], [], [0, 10]],
expected=[['b', 'c', 'FOO'], [], [], ['e', 'FOO']],
default_value='FOO',
),
# Dimensions:
# indices: [4]
# params: [2, (d1), (d2)]
dict(
descr='params: [2, (d1), (d2), indices: [4]',
indices=[1, 100, 0, -1],
params=[[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']],
[["It's", 'always', 'darkest', 'before', 'the', 'dawn']]],
expected=[[["It's", 'always', 'darkest', 'before', 'the', 'dawn']],
[['$NONE^']],
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion',
'-', 'dollar', 'takeover', 'offer', 'from', 'Microsoft',
'.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall']],
[['$NONE^']]],
),
# Dimensions:
# params: [1, (d1)]
# indices: [3]
dict(
descr='params: rank 2, indices: rank 1',
params=[
['Bruce', 'Wayne'],
],
indices=[-1, 0, 1000],
expected=[['$NONE^'], ['Bruce', 'Wayne'], ['$NONE^']]
),
# Dimensions:
# params: [1, (d1)]
# indices: [1, (d2)]
dict(
descr='Test underbound indices of shape [1, (d2)]',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
],
indices=[[8, -1]],
expected=[['!', '$NONE^']],
),
dict(
descr='Test underbound indices of shape [2, (d2)]',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['Who', 'let', 'the', 'dogs', 'out', '?'],
],
indices=[[8, -1], [1, 100]],
expected=[['!', '$NONE^'], ['let', '$NONE^']],
),
# Dimensions:
# params: [2, (d1)]
# indices: [2, (d2)]
dict(
descr='Test underbound indices of rank 2',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together',
'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the',
'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.',
'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then',
'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.']],
indices=[[8, -1], [3, 23, 35, 45, 75, 83, -121]],
expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']],
),
dict(
descr='Test overbound indices of rank 2',
params=[
['The', 'deal', 'came', 'about', '18', 'months', 'after', 'Yahoo',
'!', 'rejected', 'a', '47.5', '-', 'billion', '-', 'dollar',
'takeover', 'offer', 'from', 'Microsoft', '.'],
['He', 'left', 'us', '.', 'Little', 'boys', 'crowded', 'together',
'on', 'long', 'wooden', 'benches', ',', 'and', 'in', 'the',
'center', 'of', 'the', 'room', 'sat', 'the', 'teacher', '.',
'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',', 'then',
'shouted', 'in', 'Yiddish', ',', '``', 'One', ',', 'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.']],
indices=[[8, 8823], [3, 23, 35, 45, 75, 83, 1234]],
expected=[['!', '$NONE^'], ['.', '.', '.', '.', '!', '.', '$NONE^']],
),
# Dimensions:
# params: [2, (d1), 2]
# indices: [2, (d2)]
dict(
descr='params: rank 3, indices: rank 2',
params=[
[['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']],
[['Who', 'let'], ['the', 'dogs'], ['out', '?']],
],
ragged_rank=1,
indices=[[1, -1, 2, 30], [1, 100]],
indices_ragged_rank=1,
expected=[[['takeover', 'offer'],
['$NONE^', '$NONE^'],
['from', 'Microsoft'],
['$NONE^', '$NONE^']],
[['the', 'dogs'],
['$NONE^', '$NONE^']]],
expected_ragged_rank=1,
default_value=['$NONE^', '$NONE^'],
),
# Dimensions:
# params: [2, (d1), (d2)]
# indices: [2, (d3)]
dict(
descr='params: [2, (d1), (d2)], indices: [2, (d3)]',
params=[
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'],
],
[['It\'s', 'always', 'darkest', 'before', 'the', 'dawn']]
],
indices=[[1, 100], [0, -1]],
expected=[[['Trumpty', 'Dumpty', 'sat', 'on', 'a', 'wall'],
['$NONE^']],
[["It's", 'always', 'darkest', 'before', 'the', 'dawn'],
['$NONE^']]]
),
# Dimensions:
# params: [2, (d1), (d2)]
# indices: [2, (d1), (d3)]
dict(
descr='Test overbound indices of rank 3',
params=[
[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.'],
['Foo', 'bar', 'mar']],
[['He', 'left', 'us', '.', 'Little', 'boys', 'crowded',
'together', 'on', 'long', 'wooden', 'benches', ',', 'and', 'in',
'the', 'center', 'of', 'the', 'room', 'sat', 'the', 'teacher',
'.', 'His', 'black', 'beard', 'dripped', 'down', 'over', 'the',
'front', 'of', 'his', 'coat', '.', 'One', 'white', 'hand',
'poised', 'a', 'stick', 'above', 'his', 'desk', '.', 'He',
'turned', 'his', 'surly', ',', 'half', '-', 'closed', 'eyes',
'toward', 'us', ',', 'stared', 'for', 'a', 'second', ',',
'then', 'shouted', 'in', 'Yiddish', ',', '``', 'One', ',',
'two', ',',
'three', "''", '!', '!', 'Rapping', 'the', 'stick', 'against',
'the', 'desk', '.', 'The', 'little', 'boys', 'shrilled', 'out',
'a', 'Yiddish', 'translation', 'or', 'interpretation', 'of',
'the', 'Five', 'Books', 'of', 'Moses', ',', 'which', 'they',
'had', 'previously', 'chanted', 'in', 'Hebrew', '.'],
['I', 'too', 'was', 'hustled', 'scammed', 'bamboozled', 'hood',
'winked', 'lead', 'astray']]
],
indices=[[[8, 8823], [0, 100]], [[3, 23, 35, 45, 75, 83, 1234], [5]]],
expected=[[['!', '$NONE^'], ['Foo', '$NONE^']],
[['.', '.', '.', '.', '!', '.', '$NONE^'],
['bamboozled']]],
),
# params.shape = [2, (d1), 8]
# indices.shape = [2, (d1), 3]
dict(
descr='params = [2, (2, 1), 8], indices = [2, (2, 1), 3]',
params=[[['h'] * 8, ['w'] * 8], [['b'] * 8]],
ragged_rank=1,
indices=[[[0, 100, 1], [0, 1, 0]], [[1, 0, 0]]],
indices_ragged_rank=1,
expected=[[['h', '$NONE^', 'h'], ['w', 'w', 'w']], [['b', 'b', 'b']]],
expected_ragged_rank=1,
),
])
def testRaggedBatchGatherWithDefault(
self, descr, params, indices, expected, indices_ragged_rank=None,
expected_ragged_rank=None, ragged_rank=None, default_value='$NONE^'):
params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank or ragged_rank)
expected = ragged_factory_ops.constant(
expected, ragged_rank=expected_ragged_rank or ragged_rank)
result = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
self.assertRaggedEqual(result, expected)
@parameterized.parameters([
# Dimensions:
# params: dims [2, 5], indices: [2, 2]
dict(
descr='params: dims [2, 5], indices: [2, 2]',
params=[
['The', 'deal', 'came', 'about', '18'],
['He', 'left', 'us', '.', 'Little']],
indices=[[0, -1], [3, 121]],
expected=[['The', '$NONE^'], ['.', '$NONE^']],
default_value='$NONE^',
),
# Dimensions:
# params: dims [2, 2, 5], indices: [2, 2]
dict(
descr='params: dims [2, 2, 5], indices: [2, 2]',
params=[
[['The', 'deal', 'came', 'about', '18'],
['The', 'deal', 'came', 'about', '19'],
],
[['He', 'left', 'us', '.', 'Little'],
['The', 'deal', 'came', 'about', '20'],
]
],
indices=[[0, -1], [0, 121]],
expected=[[['The', 'deal', 'came', 'about', '18'],
['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']],
[['He', 'left', 'us', '.', 'Little'],
['$NONE^', '$NONE^', '$NONE^', '$NONE^', '$NONE^']]],
default_value='$NONE^',
),
# Test default_value with shape [5]
dict(
descr='params: dims [2, 2, 5], indices: [2, 2]',
params=[
[['The', 'deal', 'came', 'about', '18'],
['The', 'deal', 'came', 'about', '19'],
],
[['He', 'left', 'us', '.', 'Little'],
['The', 'deal', 'came', 'about', '20'],
]
],
indices=[[0, -1], [0, 121]],
expected=[[['The', 'deal', 'came', 'about', '18'],
[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']],
[['He', 'left', 'us', '.', 'Little'],
[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:']]],
default_value=[':FOO:', ':FOO:', ':FOO:', ':FOO:', ':FOO:'],
),
])
def testRaggedBatchGatherWithDefaultOnTensors(
self, descr, params, indices, expected, default_value):
params = constant_op.constant(params)
indices = constant_op.constant(indices)
expected = constant_op.constant(expected)
result = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
params=[['The', 'deal', 'came', 'about', '18', 'months', 'after',
'Yahoo', '!', 'rejected', 'a', '47.5', '-', 'billion', '-',
'dollar', 'takeover', 'offer', 'from', 'Microsoft', '.']],
indices=[[[8, -1]]],
# Exception here because different errors are thrown in eager vs
# graph mode.
error=Exception,
default_value='$NONE^',
),
])
def testRankMismatch(
self, params, indices, default_value, error):
params = ragged_factory_ops.constant(params)
indices = ragged_factory_ops.constant(indices)
with self.assertRaises(error):
_ = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
@parameterized.parameters([
# Dimensions:
# params: [2, (d1), 2]
# indices: [2, (d2)]
# default_value: []
dict(
descr='params: rank 3, indices: rank 2, default: rank = [], but'
' should be [2]',
params=[
[['The', 'deal'], ['takeover', 'offer'], ['from', 'Microsoft']],
[['Who', 'let'], ['the', 'dogs'], ['out', '?']],
],
ragged_rank=1,
indices=[[1, -1, 2, 30], [1, 100]],
indices_ragged_rank=1,
default_value='$NONE^',
error=Exception,
)
])
def testInvalidDefaultValueRank(
self, descr, params, indices, default_value, error, ragged_rank=None,
indices_ragged_rank=None):
params = ragged_factory_ops.constant(params, ragged_rank=ragged_rank)
indices = ragged_factory_ops.constant(
indices, ragged_rank=indices_ragged_rank)
with self.assertRaises(error):
_ = ragged_batch_gather_with_default_op.batch_gather_with_default(
params, indices, default_value)
def testRaggedBatchGatherUnknownRankError(self):
if context.executing_eagerly():
return
params = [['a', 'b'], ['c', 'd']]
indices = array_ops.placeholder(dtypes.int32, shape=None)
ragged_indices = ragged_tensor.RaggedTensor.from_row_splits(
indices, [0, 2, 4])
with self.assertRaisesRegexp(
ValueError, 'batch_gather does not allow indices with unknown shape.'):
ragged_batch_gather_ops.batch_gather(params, indices)
with self.assertRaisesRegexp(
ValueError, 'batch_gather does not allow indices with unknown shape.'):
ragged_batch_gather_ops.batch_gather(params, ragged_indices)
@parameterized.parameters(
[
dict(
params=ragged_factory_ops.constant_value([['a'], ['b'], ['c']]),
indices=ragged_factory_ops.constant_value([[0], [0]]),
message='Dimensions 3 and 2 are not compatible'),
dict(
params=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
indices=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]]]),
message='batch shape from indices does not match params shape'),
dict( # rank mismatch
params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]]]),
indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]],
[[0]]]),
error=(ValueError, errors.InvalidArgumentError)),
dict(
params=ragged_factory_ops.constant_value([[[0, 0], [0, 0, 0]],
[[0]], [[0]]]),
indices=ragged_factory_ops.constant_value([[[0, 0]], [[0, 0, 0]],
[[0]]]),
error=errors.InvalidArgumentError,
message='.*Condition x == y did not hold.*'),
dict(
params=ragged_factory_ops.constant_value(['a', 'b', 'c']),
indices=ragged_factory_ops.constant_value([[0], [0]]),
message='batch shape from indices does not match params shape'),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=0,
message='indices.rank must be at least 1.'),
dict(
params=ragged_factory_ops.constant_value([['a']]),
indices=[[[0]]],
message='batch shape from indices does not match params shape'),
])
def testRaggedBatchGatherStaticError(self,
params,
indices,
message=None,
error=ValueError):
with self.assertRaisesRegexp(error, message):
ragged_batch_gather_ops.batch_gather(params, indices)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_range op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRangeOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExamples(self):
"""Examples from ragged_range.__doc__."""
rt1 = ragged_math_ops.range([3, 5, 2])
self.assertRaggedEqual(rt1, [[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]])
rt2 = ragged_math_ops.range([0, 5, 8], [3, 3, 12])
self.assertRaggedEqual(rt2, [[0, 1, 2], [], [8, 9, 10, 11]])
rt3 = ragged_math_ops.range([0, 5, 8], [3, 3, 12], 2)
self.assertRaggedEqual(rt3, [[0, 2], [], [8, 10]])
def testBasicRanges(self):
# Specify limits only.
self.assertRaggedEqual(
ragged_math_ops.range([0, 3, 5]),
[list(range(0)), list(range(3)),
list(range(5))])
# Specify starts and limits.
self.assertRaggedEqual(
ragged_math_ops.range([0, 3, 5], [2, 3, 10]),
[list(range(0, 2)),
list(range(3, 3)),
list(range(5, 10))])
# Specify starts, limits, and deltas.
self.assertRaggedEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], [2, 3, 4]),
[list(range(0, 4, 2)),
list(range(3, 4, 3)),
list(range(5, 15, 4))])
def testFloatRanges(self):
expected = [[0.0, 0.4, 0.8, 1.2, 1.6, 2.0, 2.4, 2.8, 3.2, 3.6], [3.0],
[5.0, 7.2, 9.4, 11.6, 13.8]]
actual = ragged_math_ops.range([0.0, 3.0, 5.0], [3.9, 4.0, 15.0],
[0.4, 1.5, 2.2])
self.assertEqual(
expected,
[[round(v, 5) for v in row] for row in self.eval_to_list(actual)])
def testNegativeDeltas(self):
self.assertRaggedEqual(
ragged_math_ops.range([0, 3, 5], limits=0, deltas=-1),
[list(range(0, 0, -1)),
list(range(3, 0, -1)),
list(range(5, 0, -1))])
self.assertRaggedEqual(
ragged_math_ops.range([0, -3, 5], limits=0, deltas=[-1, 1, -2]),
[list(range(0, 0, -1)),
list(range(-3, 0, 1)),
list(range(5, 0, -2))])
def testBroadcast(self):
# Specify starts and limits, broadcast deltas.
self.assertRaggedEqual(
ragged_math_ops.range([0, 3, 5], [4, 4, 15], 3),
[list(range(0, 4, 3)),
list(range(3, 4, 3)),
list(range(5, 15, 3))])
# Broadcast all arguments.
self.assertRaggedEqual(
ragged_math_ops.range(0, 5, 1), [list(range(0, 5, 1))])
def testEmptyRanges(self):
rt1 = ragged_math_ops.range([0, 5, 3], [0, 3, 5])
rt2 = ragged_math_ops.range([0, 5, 5], [0, 3, 5], -1)
self.assertRaggedEqual(rt1, [[], [], [3, 4]])
self.assertRaggedEqual(rt2, [[], [5, 4], []])
def testShapeFnErrors(self):
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [[0]], 5)
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, [[5]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, 0, 5, [[0]])
self.assertRaises((ValueError, errors.InvalidArgumentError),
ragged_math_ops.range, [0], [1, 2])
def testKernelErrors(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'Requires delta != 0'):
self.evaluate(ragged_math_ops.range(0, 0, 0))
def testShape(self):
self.assertRaggedEqual(
ragged_math_ops.range(0, 0, 1).shape.as_list(), [1, None])
self.assertRaggedEqual(
ragged_math_ops.range([1, 2, 3]).shape.as_list(), [3, None])
self.assertRaggedEqual(
ragged_math_ops.range([1, 2, 3], [4, 5, 6]).shape.as_list(), [3, None])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_range_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.ragged in eager execution mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
class RaggedTensorTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']]),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]),
dict(pylist=[[[1, 2], [3, 4]], [[5, 6], [], [7, 8]]], ragged_rank=1),
])
def testRaggedTensorToList(self, pylist, ragged_rank=None):
rt = ragged_factory_ops.constant(pylist, ragged_rank)
self.assertRaggedEqual(rt, pylist)
@parameterized.parameters([
dict(pylist=[[b'a', b'b'], [b'c']]),
dict(pylist=[[[1, 2], [3]], [[4, 5, 6], [], [7]]]),
])
def testRaggedTensorStr(self, pylist):
rt = ragged_factory_ops.constant(pylist)
self.assertEqual(str(rt), '<tf.RaggedTensor %s>' % pylist)
if __name__ == '__main__':
ops.enable_eager_execution()
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_eager_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for converting between row_splits and segment_ids."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util.tf_export import tf_export
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.row_splits_to_segment_ids")
def row_splits_to_segment_ids(splits, name=None, out_type=None):
"""Generates the segmentation corresponding to a RaggedTensor `row_splits`.
Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if
`splits[j] <= i < splits[j+1]`. Example:
```python
>>> ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]).eval()
[ 0 0 0 2 2 3 4 4 4 ]
```
Args:
splits: A sorted 1-D integer Tensor. `splits[0]` must be zero.
name: A name prefix for the returned tensor (optional).
out_type: The dtype for the return value. Defaults to `splits.dtype`,
or `tf.int64` if `splits` does not have a dtype.
Returns:
A sorted 1-D integer Tensor, with `shape=[splits[-1]]`
Raises:
ValueError: If `splits` is invalid.
"""
with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name:
splits = ops.convert_to_tensor(
splits, name="splits",
preferred_dtype=dtypes.int64)
if splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("splits must have dtype int32 or int64")
splits.shape.assert_has_rank(1)
if tensor_shape.dimension_value(splits.shape[0]) == 0:
raise ValueError("Invalid row_splits: []")
if out_type is None:
out_type = splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
row_lengths = splits[1:] - splits[:-1]
nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1
indices = math_ops.range(nrows)
return ragged_util.repeat(indices, repeats=row_lengths, axis=0)
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.segment_ids_to_row_splits")
def segment_ids_to_row_splits(segment_ids, num_segments=None,
out_type=None, name=None):
"""Generates the RaggedTensor `row_splits` corresponding to a segmentation.
Returns an integer vector `splits`, where `splits[0] = 0` and
`splits[i] = splits[i-1] + count(segment_ids==i)`. Example:
```python
>>> ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]).eval()
[ 0 3 3 5 6 9 ]
```
Args:
segment_ids: A 1-D integer Tensor.
num_segments: A scalar integer indicating the number of segments. Defaults
to `max(segment_ids) + 1` (or zero if `segment_ids` is empty).
out_type: The dtype for the return value. Defaults to `segment_ids.dtype`,
or `tf.int64` if `segment_ids` does not have a dtype.
name: A name prefix for the returned tensor (optional).
Returns:
A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`.
"""
if out_type is None:
if isinstance(segment_ids, ops.Tensor):
out_type = segment_ids.dtype
elif isinstance(num_segments, ops.Tensor):
out_type = num_segments.dtype
else:
out_type = dtypes.int64
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name:
# Note: we cast int64 tensors to int32, since bincount currently only
# supports int32 inputs.
segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids",
dtype=dtypes.int32)
segment_ids.shape.assert_has_rank(1)
if num_segments is not None:
num_segments = ragged_util.convert_to_int_tensor(num_segments,
"num_segments",
dtype=dtypes.int32)
num_segments.shape.assert_has_rank(0)
row_lengths = math_ops.bincount(
segment_ids,
minlength=num_segments,
maxlength=num_segments,
dtype=out_type)
splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
# Update shape information, if possible.
if num_segments is not None:
const_num_segments = tensor_util.constant_value(num_segments)
if const_num_segments is not None:
splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1]))
return splits
|
tensorflow-master
|
tensorflow/python/ops/ragged/segment_id_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operator overloads for `RaggedTensor`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_getitem
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import tf_decorator
def _right(operator):
"""Right-handed version of an operator: swap args x and y."""
return tf_decorator.make_decorator(operator, lambda y, x: operator(x, y))
# Indexing
ragged_tensor.RaggedTensor.__getitem__ = ragged_getitem.ragged_tensor_getitem
# Ordering operators
ragged_tensor.RaggedTensor.__ge__ = math_ops.greater_equal
ragged_tensor.RaggedTensor.__gt__ = math_ops.greater
ragged_tensor.RaggedTensor.__le__ = math_ops.less_equal
ragged_tensor.RaggedTensor.__lt__ = math_ops.less
# Logical operators
ragged_tensor.RaggedTensor.__and__ = math_ops.logical_and
ragged_tensor.RaggedTensor.__rand__ = _right(math_ops.logical_and)
ragged_tensor.RaggedTensor.__invert__ = math_ops.logical_not
ragged_tensor.RaggedTensor.__ror__ = _right(math_ops.logical_or)
ragged_tensor.RaggedTensor.__or__ = math_ops.logical_or
ragged_tensor.RaggedTensor.__xor__ = math_ops.logical_xor
ragged_tensor.RaggedTensor.__rxor__ = _right(math_ops.logical_xor)
# Arithmetic operators
ragged_tensor.RaggedTensor.__abs__ = math_ops.abs
ragged_tensor.RaggedTensor.__add__ = math_ops.add
ragged_tensor.RaggedTensor.__radd__ = _right(math_ops.add)
ragged_tensor.RaggedTensor.__div__ = math_ops.div
ragged_tensor.RaggedTensor.__rdiv__ = _right(math_ops.div)
ragged_tensor.RaggedTensor.__floordiv__ = math_ops.floordiv
ragged_tensor.RaggedTensor.__rfloordiv__ = _right(math_ops.floordiv)
ragged_tensor.RaggedTensor.__mod__ = math_ops.floormod
ragged_tensor.RaggedTensor.__rmod__ = _right(math_ops.floormod)
ragged_tensor.RaggedTensor.__mul__ = math_ops.multiply
ragged_tensor.RaggedTensor.__rmul__ = _right(math_ops.multiply)
ragged_tensor.RaggedTensor.__neg__ = math_ops.negative
ragged_tensor.RaggedTensor.__pow__ = math_ops.pow
ragged_tensor.RaggedTensor.__rpow__ = _right(math_ops.pow)
ragged_tensor.RaggedTensor.__sub__ = math_ops.subtract
ragged_tensor.RaggedTensor.__rsub__ = _right(math_ops.subtract)
ragged_tensor.RaggedTensor.__truediv__ = math_ops.truediv
ragged_tensor.RaggedTensor.__rtruediv__ = _right(math_ops.truediv)
# Dummy methods
def _dummy_bool(_):
"""Dummy method to prevent a RaggedTensor from being used as a Python bool."""
raise TypeError("RaggedTensor may not be used as a boolean.")
ragged_tensor.RaggedTensor.__bool__ = _dummy_bool
ragged_tensor.RaggedTensor.__nonzero__ = _dummy_bool
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_operators.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ragged Tensors.
This package defines ops for manipulating ragged tensors (`tf.RaggedTensor`),
which are tensors with non-uniform shapes. In particular, each `RaggedTensor`
has one or more *ragged dimensions*, which are dimensions whose slices may have
different lengths. For example, the inner (column) dimension of
`rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices
(`rt[0, :]`, ..., `rt[4, :]`) have different lengths. For a more detailed
description of ragged tensors, see the `tf.RaggedTensor` class documentation
and the [Ragged Tensor Guide](/guide/ragged_tensors).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_with_default_op
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_dispatch
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_getitem
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_operators
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.ops.ragged import segment_id_ops
# Add a list of the ops that support Ragged Tensors.
__doc__ += ragged_dispatch.ragged_op_list() # pylint: disable=redefined-builtin
|
tensorflow-master
|
tensorflow/python/ops/ragged/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.row_lengths."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRowLengthsOp(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Docstring Example
dict(
rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []],
expected=[2, 0, 2, 1, 0]),
dict(
rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []],
axis=2,
expected=[[3, 1], [], [2, 1], [1], []]),
# 2D Tensor (1 ragged dimension)
dict(
rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']],
expected=[1, 3, 1, 0, 1]),
dict(
rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']],
axis=0,
expected=5),
dict(
rt_input=[['a', 'b', 'c', 'd', 'e', 'f', 'g']],
expected=[7]),
dict(
rt_input=[[], ['a', 'b', 'c', 'd', 'e', 'f', 'g'], []],
expected=[0, 7, 0]),
dict(
rt_input=[],
ragged_rank=1,
expected=[]),
dict(
rt_input=[],
ragged_rank=1,
axis=0,
expected=0),
# 3D Tensor (1 ragged dimension)
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=0,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=1,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=2,
expected=[[2, 2, 2], [2, 2]],
expected_ragged_rank=1),
# 3D Tensor (2 ragged dimensions)
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=0,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-3,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=1,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-2,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=2,
expected=[[2, 3, 0], [4, 1]],
expected_ragged_rank=1),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-1,
expected=[[2, 3, 0], [4, 1]],
expected_ragged_rank=1),
]) # pyformat: disable
def testRowLengths(self,
rt_input,
expected,
axis=1,
ragged_rank=None,
expected_ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
lengths = rt.row_lengths(axis)
self.assertRaggedEqual(lengths, expected)
if expected_ragged_rank is not None:
if isinstance(lengths, ragged_tensor.RaggedTensor):
self.assertEqual(lengths.ragged_rank, expected_ragged_rank)
else:
self.assertEqual(0, expected_ragged_rank)
@parameterized.parameters([
dict( # axis=2 out of bounds: expected -2<=axis<2.
rt_input=[[10, 20], [30]],
axis=2,
exception=(ValueError, errors.InvalidArgumentError)),
dict( # axis=-3 out of bounds: expected -2<=axis<2.
rt_input=[[2, 3, 0], [4, 1, 2]],
axis=-3,
exception=(ValueError, errors.InvalidArgumentError)),
])
def testErrors(self, rt_input, exception, message=None, axis=1):
rt = ragged_factory_ops.constant(rt_input)
with self.assertRaisesRegexp(exception, message):
rt.row_lengths(axis)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.ragged.ragged_tensor_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged.ragged_tensor_shape import RaggedTensorDynamicShape
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorShapeTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def assertShapeEq(self, x, y):
assert isinstance(x, RaggedTensorDynamicShape)
assert isinstance(y, RaggedTensorDynamicShape)
x_partitioned_dim_sizes = [
self.eval_to_list(splits) #
for splits in x.partitioned_dim_sizes
]
y_partitioned_dim_sizes = [
self.eval_to_list(splits) #
for splits in y.partitioned_dim_sizes
]
self.assertEqual(x_partitioned_dim_sizes, y_partitioned_dim_sizes)
self.assertAllEqual(x.inner_dim_sizes, y.inner_dim_sizes)
@parameterized.parameters([
dict(value='x', expected_dim_sizes=[]),
dict(value=['a', 'b', 'c'], expected_dim_sizes=[3]),
dict(value=[['a', 'b', 'c'], ['d', 'e', 'f']], expected_dim_sizes=[2, 3]),
dict(
value=[[['a', 'b', 'c'], ['d', 'e', 'f']]],
expected_dim_sizes=[1, 2, 3]),
dict(
value=ragged_factory_ops.constant_value([['a', 'b', 'c'], ['d',
'e']]),
expected_dim_sizes=[2, [3, 2]]),
dict(
value=ragged_factory_ops.constant_value([[['a', 'b', 'c'], ['d',
'e']]]),
expected_dim_sizes=[1, [2], [3, 2]]),
dict(
value=ragged_factory_ops.constant_value(
[[['a', 'b', 'c'], ['d', 'e', 'f']]], ragged_rank=1),
expected_dim_sizes=[1, [2], 3]),
dict(
value=ragged_factory_ops.constant_value(
[[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1),
expected_dim_sizes=[2, [2, 1], 2, 1]),
dict(
value=ragged_factory_ops.constant_value([[10, 20], [30]]),
expected_dim_sizes=[2, [2, 1]]),
# Docstring examples:
dict(value=[[1, 2, 3], [4, 5, 6]], expected_dim_sizes=[2, 3]),
dict(
value=ragged_factory_ops.constant_value([[1, 2], [], [3, 4, 5]]),
expected_dim_sizes=[3, [2, 0, 3]]),
dict(
value=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5, 6]]],
ragged_rank=1),
expected_dim_sizes=[2, [2, 1], 2]),
dict(
value=ragged_factory_ops.constant_value([[[1, 2], [3]], [[4, 5]]]),
expected_dim_sizes=[2, [2, 1], [2, 1, 2]]),
])
def testFromTensor(self, value, expected_dim_sizes):
shape = RaggedTensorDynamicShape.from_tensor(value)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes)
self.assertShapeEq(shape, expected)
@parameterized.parameters([
dict(dim_sizes=[], rank=0, expected_dim_sizes=[]),
dict(dim_sizes=[], rank=3, expected_dim_sizes=[1, 1, 1]),
dict(dim_sizes=[3], rank=1, expected_dim_sizes=[3]),
dict(dim_sizes=[3], rank=3, expected_dim_sizes=[1, 1, 3]),
dict(dim_sizes=[2, 3], rank=3, expected_dim_sizes=[1, 2, 3]),
dict(dim_sizes=[3, [3, 2, 4]], rank=2, expected_dim_sizes=[3, [3, 2, 4]]),
dict(
dim_sizes=[3, [3, 2, 4]],
rank=4,
expected_dim_sizes=[1, 1, 3, [3, 2, 4]]),
dict(
dim_sizes=[3, [3, 2, 4], 2, 3],
rank=5,
expected_dim_sizes=[1, 3, [3, 2, 4], 2, 3]),
])
def testBroadcastToRank(self, dim_sizes, rank, expected_dim_sizes):
shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dim_sizes)
broadcasted_shape = shape.broadcast_to_rank(rank)
self.assertShapeEq(broadcasted_shape, expected)
self.assertEqual(broadcasted_shape.rank, rank)
@parameterized.parameters([
#=========================================================================
# dimension[axis] is uniform inner; and row_lengths is a scalar
#=========================================================================
# shape: [BROADCAST(UNIFORM), UNIFORM, UNIFORM]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, 4, 5],
broadcast_dim_sizes=[3, 4, 5]),
# shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=5,
original_dim_sizes=[3, 4, 1],
broadcast_dim_sizes=[3, 4, 5]),
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=5,
original_dim_sizes=[3, [3, 2, 8], 1],
broadcast_dim_sizes=[3, [3, 2, 8], 5]),
# shape: [UNIFORM, RAGGED, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=5,
row_length=5,
original_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 1],
broadcast_dim_sizes=[2, [2, 1], [3, 2, 8], 3, 4, 5]),
#=========================================================================
# dimension[axis] is uniform inner; and row_lengths is a vector
#=========================================================================
# shape: [UNIFORM, BROADCAST(UNIFORM)]
dict(axis=1,
row_length=[2, 0, 1],
original_dim_sizes=[3, 1],
broadcast_dim_sizes=[3, [2, 0, 1]]),
# shape: [UNIFORM, BROADCAST(UNIFORM), UNIFORM]
dict(axis=1,
row_length=[2, 0, 1],
original_dim_sizes=[3, 1, 5],
broadcast_dim_sizes=[3, [2, 0, 1], 5]),
# shape: [UNIFORM, UNIFORM, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=[2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0],
original_dim_sizes=[4, 3, 1],
broadcast_dim_sizes=[4, 3, [2, 0, 1, 3, 8, 2, 3, 4, 1, 8, 7, 0]]),
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM)]
dict(axis=2,
row_length=[2, 5, 3],
original_dim_sizes=[2, [2, 1], 1],
broadcast_dim_sizes=[2, [2, 1], [2, 5, 3]]),
# shape: [UNIFORM, RAGGED, UNIFORM, UNIFORM, BROADCAST(UNIFORM), UNIFORM]
dict(axis=4,
row_length=list(range(18)),
original_dim_sizes=[2, [2, 1], 3, 2, 1, 8],
broadcast_dim_sizes=[2, [2, 1], 3, 2, list(range(18)), 8]),
#=========================================================================
# dimension[axis] is uniform partitioned; and row_lengths is a scalar
#=========================================================================
# shape: [BROADCAST(UNIFORM), RAGGED]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, [5]],
broadcast_dim_sizes=[3, [5, 5, 5]]),
# shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED]
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 3, [3, 0, 2]],
broadcast_dim_sizes=[2, 3, [3, 0, 2, 3, 0, 2]]),
# shape: [BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM, UNIFORM]
dict(axis=0,
row_length=3,
original_dim_sizes=[1, [3], [3, 5, 2], 9, 4, 5],
broadcast_dim_sizes=[3, [3, 3, 3], [3, 5, 2, 3, 5, 2, 3, 5, 2],
9, 4, 5]),
# shape: [BROADCAST(UNIFORM), UNIFORM, RAGGED, UNIFORM]
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 2, [2, 1], [3, 5, 2], 2],
broadcast_dim_sizes=[2, 2, [2, 1, 2, 1], [3, 5, 2, 3, 5, 2], 2]),
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM]
dict(axis=1,
row_length=2,
original_dim_sizes=[3, 1, [4, 0, 2], 5],
broadcast_dim_sizes=[3, 2, [4, 0, 2, 4, 0, 2], 5]),
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED]
dict(axis=1,
row_length=1,
original_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)],
broadcast_dim_sizes=[2, 3, (1, 2, 3, 4, 5, 6)]),
#=========================================================================
# dimension[axis] is uniform partitioned; and row_lengths is a vector
#=========================================================================
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, UNIFORM]
dict(axis=1,
row_length=[4, 1, 2],
original_dim_sizes=[
3, # axis=0
1, # axis=1 (broadcast)
[3, 1, 2], # axis=2
5], # axis=3
broadcast_dim_sizes=[
3, # axis=0
[4, 1, 2], # axis=1 (broadcast)
[3, 3, 3, 3, 1, 2, 2], # axis=2
5]), # axis=3
# shape: [UNIFORM, BROADCAST(UNIFORM), RAGGED, RAGGED]
dict(axis=1,
row_length=[2, 0, 3],
original_dim_sizes=[
3, # axis=0
1, # axis=1 (broadcast)
[3, 1, 2], # axis=2
[3, 1, 4, 1, 5, 9]], # axis=3
broadcast_dim_sizes=[
3, # axis=0
[2, 0, 3], # axis=1 (broadcast)
[3, 3, 2, 2, 2], # axis=2
[3, 1, 4, 3, 1, 4, 5, 9, 5, 9, 5, 9]]), # axis=3
# shape: [UNIFORM, RAGGED, BROADCAST(UNIFORM), RAGGED, RAGGED, UNIFORM]
dict(axis=2,
row_length=[4, 1, 2],
original_dim_sizes=[
3, # axis=0
[2, 0, 1], # axis=1
1, # axis=2 (broadcast)
[3, 2, 1], # axis=3
[1, 0, 1, 0, 2, 3], # axis=4
5], # axis=5
broadcast_dim_sizes=[
3, # axis=0
[2, 0, 1], # axis=2
[4, 1, 2], # axis=2 (broadcast)
[3, 3, 3, 3, 2, 1, 1], # axis=3
[1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, # axis=4
2, 3, 3],
5]), # axis=5
dict(axis=0,
row_length=2,
original_dim_sizes=[1, 1, 2, (2, 1)],
broadcast_dim_sizes=[2, 1, 2, (2, 1, 2, 1)]),
dict(axis=1,
row_length=(2, 1),
original_dim_sizes=[2, 1, 2, (2, 1, 2, 1)],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
dict(axis=2,
row_length=2,
original_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
dict(axis=3,
row_length=(2, 1, 2, 1, 2, 1),
original_dim_sizes=[2, (2, 1), 2, 1],
broadcast_dim_sizes=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
]) # pyformat: disable
def testBroadcastDimension(self, axis, row_length, original_dim_sizes,
broadcast_dim_sizes):
"""Tests for the broadcast_dimension method.
Verifies that:
* `original.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, row_length) == broadcast`
* `broadcast.broadcast_dimension(axis, 1) == broadcast`
Args:
axis: The axis to broadcast
row_length: The slice lengths to broadcast to.
original_dim_sizes: The dimension sizes before broadcasting.
original_dim_sizes[axis] should be equal to `1` or `row_length`.
broadcast_dim_sizes: THe dimension sizes after broadcasting.
"""
original_shape = RaggedTensorDynamicShape.from_dim_sizes(original_dim_sizes)
bcast_shape = RaggedTensorDynamicShape.from_dim_sizes(broadcast_dim_sizes)
self.assertEqual(original_shape.rank, bcast_shape.rank)
# shape[axis].value == 1 and row_length > 1:
bcast1 = original_shape.broadcast_dimension(axis, row_length)
# shape[axis].value > 1 and row_length == shape[axis].value:
bcast2 = bcast_shape.broadcast_dimension(axis, row_length)
# shape[axis].value > 1 and row_length == 1:
bcast3 = bcast_shape.broadcast_dimension(axis, 1)
self.assertShapeEq(bcast1, bcast_shape)
self.assertShapeEq(bcast2, bcast_shape)
self.assertShapeEq(bcast3, bcast_shape)
@parameterized.parameters(
[
# Broadcast scalar
dict(x_dims=[], y_dims=[], expected_dims=[]),
dict(x_dims=[], y_dims=[2], expected_dims=[2]),
dict(x_dims=[], y_dims=[2, 3], expected_dims=[2, 3]),
dict(
x_dims=[],
y_dims=[2, (2, 3), (5, 7, 2, 0, 9)],
expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]),
# Broadcast vector
dict(x_dims=[3], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]),
dict(x_dims=[1], y_dims=[4, 2, 3], expected_dims=[4, 2, 3]),
dict(x_dims=[3], y_dims=[4, 2, 1], expected_dims=[4, 2, 3]),
dict(
x_dims=[3],
y_dims=[3, (2, 3, 1), 1],
expected_dims=[3, (2, 3, 1), 3]),
dict(x_dims=[1], y_dims=[3, (2, 1, 3)], expected_dims=[3, (2, 1, 3)]),
dict(
x_dims=[1],
y_dims=[3, (2, 1, 3), 8],
expected_dims=[3, (2, 1, 3), 8]),
dict(
x_dims=[1],
y_dims=[2, (2, 3), (5, 7, 2, 0, 9)],
expected_dims=[2, (2, 3), (5, 7, 2, 0, 9)]),
# Mixed broadcasting
dict(
x_dims=[
1, # axis=0
3, # axis=1
(3, 0, 2), # axis=2
1, # axis=3
2, # axis=4
],
y_dims=[
2, # axis=0
1, # axis=1
1, # axis=2
(7, 2), # axis=3
1, # axis=4
],
expected_dims=[
2, # axis=0
3, # axis=1
(3, 0, 2, 3, 0, 2), # axis=2
(7, 7, 7, 7, 7, 2, 2, 2, 2, 2), # axis=3
2, # axis=4
]),
dict(
x_dims=[2, (2, 1), 2, 1],
y_dims=[1, 1, 2, (2, 1)],
expected_dims=[2, (2, 1), 2, (2, 1, 2, 1, 2, 1)]),
])
def testBroadcastDynamicShape(self, x_dims, y_dims, expected_dims):
x_shape = RaggedTensorDynamicShape.from_dim_sizes(x_dims)
y_shape = RaggedTensorDynamicShape.from_dim_sizes(y_dims)
expected = RaggedTensorDynamicShape.from_dim_sizes(expected_dims)
result1 = ragged_tensor_shape.broadcast_dynamic_shape(x_shape, y_shape)
result2 = ragged_tensor_shape.broadcast_dynamic_shape(y_shape, x_shape)
self.assertShapeEq(expected, result1)
self.assertShapeEq(expected, result2)
def testRepr(self):
shape = RaggedTensorDynamicShape.from_dim_sizes([2, (2, 1), 2, 1])
self.assertRegexpMatches(
repr(shape),
r'RaggedTensorDynamicShape\('
r'partitioned_dim_sizes=\(<[^>]+>, <[^>]+>\), '
r'inner_dim_sizes=<[^>]+>\)')
@parameterized.parameters(
[
dict(
x=[[10], [20], [30]], # shape=[3, 1]
dim_sizes=[3, 2],
expected=[[10, 10], [20, 20], [30, 30]]),
dict(
x=[[10], [20], [30]], # shape=[3, 1]
dim_sizes=[3, [3, 0, 2]],
expected=ragged_factory_ops.constant_value(
[[10, 10, 10], [], [30, 30]], dtype=np.int32)),
dict(
x=[[[1, 2, 3]], [[4, 5, 6]]], # shape = [2, 1, 3]
dim_sizes=[2, [2, 3], 3],
expected=ragged_factory_ops.constant_value(
[[[1, 2, 3], [1, 2, 3]], [[4, 5, 6], [4, 5, 6], [4, 5, 6]]],
dtype=np.int32,
ragged_rank=1)),
dict(
x=[[[1]], [[2]]], # shape = [2, 1, 1]
dim_sizes=[2, [2, 3], [0, 2, 1, 2, 0]],
expected=ragged_factory_ops.constant_value(
[[[], [1, 1]], [[2], [2, 2], []]],
dtype=np.int32,
ragged_rank=2)),
dict(
x=10,
dim_sizes=[3, [3, 0, 2]],
expected=ragged_factory_ops.constant_value([[10, 10, 10], [],
[10, 10]])),
])
def testRaggedBroadcastTo(self, x, dim_sizes, expected):
shape = RaggedTensorDynamicShape.from_dim_sizes(dim_sizes)
result = ragged_tensor_shape.broadcast_to(x, shape)
self.assertEqual(
getattr(result, 'ragged_rank', 0), getattr(expected, 'ragged_rank', 0))
self.assertRaggedEqual(result, expected)
@parameterized.parameters(
[
dict(
doc='x.shape=[3, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]],
dtype=np.int32),
y=[[10], [20], [30]],
expected=ragged_factory_ops.constant_value([[11, 12, 13], [],
[34, 35]])),
dict(
doc='x.shape=[3, (D1)]; y.shape=[]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3], [], [4, 5]],
dtype=np.int32),
y=10,
expected=ragged_factory_ops.constant_value([[11, 12, 13], [],
[14, 15]])),
dict(
doc='x.shape=[1, (D1)]; y.shape=[3, 1]; bcast.shape=[3, (D1)]',
x=ragged_factory_ops.constant_value([[1, 2, 3]], dtype=np.int32),
y=[[10], [20], [30]],
expected=ragged_factory_ops.constant_value(
[[11, 12, 13], [21, 22, 23], [31, 32, 33]], dtype=np.int32)),
dict(
doc=('x.shape=[2, (D1), 1]; y.shape=[1, (D2)]; '
'bcast.shape=[2, (D1), (D2)]'),
x=ragged_factory_ops.constant_value([[[1], [2], [3]], [[4]]],
ragged_rank=1),
y=ragged_factory_ops.constant_value([[10, 20, 30]]),
expected=ragged_factory_ops.constant_value([[[11, 21, 31],
[12, 22, 32],
[13, 23, 33]],
[[14, 24, 34]]])),
dict(
doc=('x.shape=[2, (D1), 1]; y.shape=[1, 1, 4]; '
'bcast.shape=[2, (D1), 4]'),
x=ragged_factory_ops.constant_value([[[10], [20]], [[30]]],
ragged_rank=1),
y=[[[1, 2, 3, 4]]],
expected=ragged_factory_ops.constant_value(
[[[11, 12, 13, 14], [21, 22, 23, 24]], [[31, 32, 33, 34]]],
ragged_rank=1)),
dict(
doc=('x.shape=[2, (D1), 2, 1]; y.shape=[2, (D2)]; '
'bcast.shape=[2, (D1), (2), (D2)'),
x=ragged_factory_ops.constant_value(
[[[[1], [2]], [[3], [4]]], [[[5], [6]]]], ragged_rank=1),
y=ragged_factory_ops.constant_value([[10, 20], [30]]),
expected=ragged_factory_ops.constant_value([[[[11, 21], [32]],
[[13, 23], [34]]],
[[[15, 25], [36]]]])),
])
def testRaggedAddWithBroadcasting(self, x, y, expected, doc):
expected_rrank = getattr(expected, 'ragged_rank', 0)
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
result_rrank = getattr(result, 'ragged_rank', 0)
self.assertEqual(expected_rrank, result_rrank)
if hasattr(expected, 'tolist'):
expected = expected.tolist()
self.assertRaggedEqual(result, expected)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor_shape_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for constructing RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# Op to construct a constant RaggedTensor from a nested Python list.
#===============================================================================
@tf_export("ragged.constant")
def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,
name=None, row_splits_dtype=dtypes.int64):
"""Constructs a constant RaggedTensor from a nested Python list.
Example:
```python
>>> ragged.constant([[1, 2], [3], [4, 5, 6]]).eval()
RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6])
```
All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar
values, then `K` is one greater than the maximum depth of empty lists in
`pylist`. All scalar values in `pylist` must be compatible with `dtype`.
Args:
pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that
is not a `list`, `tuple` or `np.ndarray` must be a scalar value
compatible with `dtype`.
dtype: The type of elements for the returned `RaggedTensor`. If not
specified, then a default is chosen based on the scalar values in
`pylist`.
ragged_rank: An integer specifying the ragged rank of the returned
`RaggedTensor`. Must be nonnegative and less than `K`. Defaults to
`max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K
- 1 - len(inner_shape))` if `inner_shape` is specified.
inner_shape: A tuple of integers specifying the shape for individual inner
values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank`
is not specified. If `ragged_rank` is specified, then a default is chosen
based on the contents of `pylist`.
name: A name prefix for the returned tensor (optional).
row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits.
One of `tf.int32` or `tf.int64`.
Returns:
A potentially ragged tensor with rank `K` and the specified `ragged_rank`,
containing the values from `pylist`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
def ragged_factory(values, row_splits):
row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype)
return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits,
validate=False)
with ops.name_scope(name, "RaggedConstant"):
return _constant_value(ragged_factory, constant_op.constant, pylist, dtype,
ragged_rank, inner_shape)
@tf_export(v1=["ragged.constant_value"])
def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None,
row_splits_dtype="int64"):
"""Constructs a RaggedTensorValue from a nested Python list.
Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`.
If you wish to construct a constant `RaggedTensor`, use
[`ragged.constant(...)`](constant.md) instead.
Example:
```python
>>> ragged.constant_value([[1, 2], [3], [4, 5, 6]])
RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6])
```
All scalar values in `pylist` must have the same nesting depth `K`, and the
returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no
scalar values, then `K` is one greater than the maximum depth of empty lists
in `pylist`. All scalar values in `pylist` must be compatible with `dtype`.
Args:
pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that
is not a `list` or `tuple` must be a scalar value compatible with `dtype`.
dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`.
If not specified, then a default is chosen based on the scalar values in
`pylist`.
ragged_rank: An integer specifying the ragged rank of the returned
`RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to
`max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K
- 1 - len(inner_shape))` if `inner_shape` is specified.
inner_shape: A tuple of integers specifying the shape for individual inner
values in the returned `RaggedTensorValue`. Defaults to `()` if
`ragged_rank` is not specified. If `ragged_rank` is specified, then a
default is chosen based on the contents of `pylist`.
row_splits_dtype: data type for the constructed `RaggedTensorValue`'s
row_splits. One of `numpy.int32` or `numpy.int64`.
Returns:
A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified
`ragged_rank`, containing the values from `pylist`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype
def _ragged_factory(values, row_splits):
row_splits = np.array(row_splits, dtype=row_splits_dtype)
return ragged_tensor_value.RaggedTensorValue(values, row_splits)
def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument
return np.reshape(np.array(pylist, dtype=dtype), shape)
return _constant_value(_ragged_factory, _inner_factory, pylist, dtype,
ragged_rank, inner_shape)
def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank,
inner_shape):
"""Constructs a constant RaggedTensor or RaggedTensorValue.
Args:
ragged_factory: A factory function with the signature:
`ragged_factory(values, row_splits)`
inner_factory: A factory function with the signature: `inner_factory(pylist,
dtype, shape, name)`
pylist: A nested `list`, `tuple` or `np.ndarray`.
dtype: Data type for returned value.
ragged_rank: Ragged rank for returned value.
inner_shape: Inner value shape for returned value.
Returns:
A value returned by `ragged_factory` or `inner_factory`.
Raises:
ValueError: If the scalar values in `pylist` have inconsistent nesting
depth; or if ragged_rank or inner_shape are incompatible with `pylist`.
"""
if ragged_tensor.is_ragged(pylist):
raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.")
# np.ndim builds an array, so we short-circuit lists and tuples.
if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:
# Scalar value
if ragged_rank is not None and ragged_rank != 0:
raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" %
(pylist, ragged_rank))
if inner_shape is not None and inner_shape:
raise ValueError(
"Invalid pylist=%r: incompatible with dim(inner_shape)=%d" %
(pylist, len(inner_shape)))
return inner_factory(pylist, dtype, ())
if ragged_rank is not None and ragged_rank < 0:
raise ValueError(
"Invalid ragged_rank=%r: must be nonnegative" % ragged_rank)
# Find the depth of scalar values in `pylist`.
scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)
if scalar_depth is not None:
if max_depth > scalar_depth:
raise ValueError("Invalid pylist=%r: empty list nesting is greater "
"than scalar value nesting" % pylist)
# If both inner_shape and ragged_rank were specified, then check that
# they are compatible with pylist.
if inner_shape is not None and ragged_rank is not None:
expected_depth = ragged_rank + len(inner_shape) + 1
if ((scalar_depth is not None and expected_depth != scalar_depth) or
(scalar_depth is None and expected_depth < max_depth)):
raise ValueError(
"Invalid pylist=%r: incompatible with ragged_rank=%d "
"and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape)))
# Check if the result is a `Tensor`.
if (ragged_rank == 0 or
(ragged_rank is None and
((max_depth < 2) or
(inner_shape is not None and max_depth - len(inner_shape) < 2)))):
return inner_factory(pylist, dtype, inner_shape)
# Compute default value for inner_shape.
if inner_shape is None:
if ragged_rank is None:
inner_shape = ()
else:
inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)
# Compute default value for ragged_rank.
if ragged_rank is None:
if scalar_depth is None:
ragged_rank = max(1, max_depth - 1)
else:
ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))
# Build the splits for each ragged rank, and concatenate the inner values
# into a single list.
nested_splits = []
values = pylist
for dim in range(ragged_rank):
nested_splits.append([0])
concatenated_values = []
for row in values:
nested_splits[dim].append(nested_splits[dim][-1] + len(row))
concatenated_values.extend(row)
values = concatenated_values
values = inner_factory(
values, dtype=dtype, shape=(len(values),) + inner_shape, name="values")
for row_splits in reversed(nested_splits):
values = ragged_factory(values, row_splits)
return values
def _find_scalar_and_max_depth(pylist):
"""Finds nesting depth of scalar values in pylist.
Args:
pylist: A nested python `list` or `tuple`.
Returns:
A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting
depth of scalar values in `pylist`, or `None` if `pylist` contains no
scalars. `max_depth` is the maximum depth of `pylist` (including
empty lists).
Raises:
ValueError: If pylist has inconsistent nesting depths for scalars.
"""
# Check if pylist is not scalar. np.ndim builds an array, so we
# short-circuit lists and tuples.
if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:
scalar_depth = None
max_depth = 1
for child in pylist:
child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)
if child_scalar_depth is not None:
if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:
raise ValueError("all scalar values must have the same nesting depth")
scalar_depth = child_scalar_depth + 1
max_depth = max(max_depth, child_max_depth + 1)
return (scalar_depth, max_depth)
return (0, 0)
def _default_inner_shape_for_pylist(pylist, ragged_rank):
"""Computes a default inner shape for the given python list."""
def get_inner_shape(item):
"""Returns the inner shape for a python list `item`."""
if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:
return ()
elif item:
return (len(item),) + get_inner_shape(item[0])
return (0,)
def check_inner_shape(item, shape):
"""Checks that `item` has a consistent shape matching `shape`."""
is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0
if is_nested != bool(shape):
raise ValueError("inner values have inconsistent shape")
if is_nested:
if shape[0] != len(item):
raise ValueError("inner values have inconsistent shape")
for child in item:
check_inner_shape(child, shape[1:])
# Collapse the ragged layers to get the list of inner values.
flat_values = pylist
for dim in range(ragged_rank):
if not all(
isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values):
raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d "
"requires scalar value depth greater than %d" %
(dim + 1, ragged_rank, ragged_rank))
flat_values = sum((list(v) for v in flat_values), [])
# Compute the inner shape looking only at the leftmost elements; and then
# use check_inner_shape to verify that other elements have the same shape.
inner_shape = get_inner_shape(flat_values)
check_inner_shape(flat_values, inner_shape)
return inner_shape[1:]
@tf_export(v1=["ragged.placeholder"])
def placeholder(dtype, ragged_rank, value_shape=None, name=None):
"""Creates a placeholder for a `tf.RaggedTensor` that will always be fed.
**Important**: This ragged tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The data type for the `RaggedTensor`.
ragged_rank: The ragged rank for the `RaggedTensor`
value_shape: The shape for individual flat values in the `RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `RaggedTensor` that may be used as a handle for feeding a value, but
not evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if ragged_rank == 0:
return array_ops.placeholder(dtype, value_shape, name)
with ops.name_scope(name, "RaggedPlaceholder", []):
flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)
result = array_ops.placeholder(dtype, flat_shape, "flat_values")
for i in reversed(range(ragged_rank)):
row_splits = array_ops.placeholder(dtypes.int64, [None],
"row_splits_%d" % i)
result = ragged_tensor.RaggedTensor(result, row_splits, internal=True)
return result
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_factory_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gather operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_ragged_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
#===============================================================================
# ragged_gather
#===============================================================================
# TODO(edloper): Add an `axis` argument
def gather(params, indices, validate_indices=None, axis=0, batch_dims=0,
name=None):
"""Gathers ragged slices from `params` axis `0` according to `indices`.
Returns `RaggedTensor` output, such that:
```python
output.shape = indices.shape + params.shape[1:]
output.ragged_rank = indices.shape.ndims + params.ragged_rank
output[i...j, d0...dn] = params[indices[i...j], d0...dn]
```
`params` may be ragged. `indices` may be ragged.
`indices` must have dtype `int32` or `int64`. If any index is out of bounds,
then an error is returned.
Examples:
```python
>>> params = tf.constant(['a', 'b', 'c', 'd', 'e'])
>>> indices = tf.constant([3, 1, 2, 1, 0])
>>> ragged_params = tf.ragged.constant([['a', 'b', 'c'], ['d'], [], ['e']])
>>> ragged_indices = tf.ragged.constant([[3, 1, 2], [1], [], [0]])
>>> print ragged.gather(params, ragged_indices)
[['d', 'b', 'c'], ['b'], [], ['a']]
>>> print ragged.gather(ragged_params, indices)
[['e'], ['d'], [], ['d'], ['a', 'b', 'c']]
>>> print ragged.gather(ragged_params, ragged_indices)
[[['e'], ['d'], []], [['d']], [], [['a', 'b', 'c']]]
```
Args:
params: The potentially ragged tensor from which to gather values. Must be
at least rank 1.
indices: The potentially ragged tensor indicating which values to gather.
Must have dtype `int32` or `int64`. Values must be in the range `[0,
params.shape[0]]`.
validate_indices: Ignored.
axis: Must be zero.
batch_dims: Must be zero.
name: A name for the operation (optional).
Returns:
A `RaggedTensor`, where `output.dtype=params.dtype` and
`output.shape=indices.shape + params.shape[1:]` and
`output.ragged_rank=indices.shape.ndims + params.ragged_rank`.
Raises:
ValueError: If indices.shape.ndims is not known statically.
"""
del validate_indices
if not isinstance(axis, int) or axis != 0:
raise ValueError('axis != 0 is not supported for ragged gather yet.')
if not isinstance(batch_dims, int) or batch_dims != 0:
raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')
with ops.name_scope(name, 'RaggedGather', [params, indices]):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
params, indices = ragged_tensor.match_row_splits_dtypes(params, indices)
if ragged_tensor.is_ragged(indices):
return indices.with_values(gather(params, indices.values))
if not ragged_tensor.is_ragged(params):
return array_ops.gather(params, indices)
indices = ops.convert_to_tensor(indices)
if indices.shape.ndims is None:
raise ValueError('indices.shape.ndims must be known statically')
result = gen_ragged_array_ops.ragged_gather(
indices=indices,
params_dense_values=params.flat_values,
params_nested_splits=params.nested_row_splits,
OUTPUT_RAGGED_RANK=indices.shape.ndims + len(params.nested_row_splits) -
1)
# Compose the RaggedTensor from splits & values.
return ragged_tensor.RaggedTensor.from_nested_row_splits(
result.output_dense_values, result.output_nested_splits, validate=False)
#===============================================================================
# ragged.gather_nd
#===============================================================================
def gather_nd(params, indices, batch_dims=0, name=None):
"""Gather slices from `params` using `n`-dimensional indices.
This operation is similar to `gather`, but it uses the innermost dimension
of `indices` to define a slice into `params`. In particular, if:
* `indices` has shape `[A1...AN, I]`
* `params` has shape `[B1...BM]`
Then:
* `result` has shape `[A1...AN, B_{I+1}...BM]`.
* `result[a1...aN] = params[indices[a1...aN, :]]`
Args:
params: A potentially ragged tensor with shape `[A1...AN, I]`.
indices: A potentially ragged tensor with shape `[B1...BM]`.
batch_dims: Must be zero.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`.
#### Examples:
```python
>>> params = tf.compat.v1.ragged.constant_value(
... [ [ ['000', '001'], ['010' ] ],
... [ ['100' ], ['110', '111', '112'], ['120'] ],
... [ [ ], ['210' ] ] ])
>>> # Gather 2D slices from a 3D tensor
>>> ragged.gather_nd(params, [[2], [0]])
[ [ [ ], ['210'] ]
[ ['000', '001'], ['010'] ] ]
>>> # Gather 1D slices from a 3D tensor
>>> ragged.gather_nd(params, [[2, 1], [0, 0]])
[['210'], ['000', '001']]
>>> # Gather scalars from a 3D tensor
>>> ragged.gather_nd(params, [[0, 0, 1], [1, 1, 2]])
['001', '112']
```
"""
if not isinstance(batch_dims, int) or batch_dims != 0:
raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')
if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)):
return array_ops.gather_nd(params, indices, name)
with ops.name_scope(name, 'RaggedGatherNd', [params, indices]):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params')
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
params, indices = ragged_tensor.match_row_splits_dtypes(params, indices)
indices_shape = indices.shape
indices_ndims = indices_shape.ndims
if indices_ndims is None:
raise ValueError('indices.rank be statically known.')
if indices_ndims == 0:
raise ValueError('indices.rank must be at least 1.')
if (ragged_tensor.is_ragged(indices) and
indices_ndims == indices.ragged_rank + 1):
raise ValueError('The innermost dimension of indices may not be ragged')
# `index_size` is the "n" in "gather_nd" -- i.e., the number of dimensions
# that each index slices into.
index_size = tensor_shape.dimension_value(indices_shape[-1])
if index_size is None:
raise ValueError('indices.shape[-1] must be statically known.')
# If `indices` has more than 2 dimensions, then recurse. If `indices` is
# dense, then we convert it to ragged before recursing, and then convert
# the result back to `dense` if appropriate.
if indices_ndims > 2:
indices_is_dense = not ragged_tensor.is_ragged(indices)
if indices_is_dense:
indices = ragged_tensor.RaggedTensor.from_tensor(
indices, ragged_rank=indices_ndims - 2,
row_splits_dtype=params.row_splits.dtype)
result = indices.with_flat_values(gather_nd(params, indices.flat_values))
if (indices_is_dense and ragged_tensor.is_ragged(result) and
result.ragged_rank == indices_ndims - 2):
result = ragged_tensor.RaggedTensor.to_tensor(result)
return result
# indices_ndims <= 2, and the innermost dimension of indices may not be
# ragged, so `indices` must not be ragged.
assert not ragged_tensor.is_ragged(indices)
assert ragged_tensor.is_ragged(params)
# Handle corner case: An empty index tuple selects the entire `params`
# value. So if `index_size` is zero, then tile `params`.
if index_size == 0:
params_ndims = params.ragged_rank + array_ops.rank(params.flat_values)
for dim in range(indices_ndims - 1):
params = ragged_array_ops.expand_dims(params, axis=0)
multiples = array_ops.concat([
array_ops.shape(indices)[:-1],
array_ops.ones([params_ndims], dtypes.int32)
],
axis=0)
return ragged_array_ops.tile(params, multiples)
# When index_size=1, we can just flatten the index tuples and use gather.
elif index_size == 1:
flattened_index_tuples = array_ops.reshape(indices, [-1])
return gather(params, flattened_index_tuples)
# Otherwise, params is a RaggedTensor, and indices is a 1D or 2D Tensor.
# Flatten both the index tuples and the params, such that the flattened
# index tuples point to the correct values in the flattened params; and
# then use ragged.gather on the flattened index tuples & params.
else:
indices = math_ops.cast(indices, params.row_splits.dtype)
# Flatten the outermost 2 dimensions of the index tuples & params.
flattened_index_tuples = array_ops.gather(params.row_splits,
indices[..., 0])
flattened_index_tuples += indices[..., 1]
flattened_params = params.values
# Flatten any remaining dimensions.
for dim in range(2, index_size):
if not ragged_tensor.is_ragged(flattened_params):
flattened_index_tuples = array_ops.expand_dims(
flattened_index_tuples, axis=1)
flattened_index_tuples = array_ops.concat(
[flattened_index_tuples, indices[..., dim:]], axis=1)
return array_ops.gather_nd(flattened_params, flattened_index_tuples)
flattened_index_tuples = array_ops.gather(
flattened_params.row_starts(), flattened_index_tuples)
flattened_index_tuples += indices[..., dim]
flattened_params = flattened_params.values
# Gather using the flattened index tuples and params.
return gather(flattened_params, flattened_index_tuples)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_gather_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for ragged tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_ragged_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# ragged.range
#===============================================================================
# pylint: disable=redefined-builtin
@tf_export('ragged.range')
def range(starts, limits=None, deltas=1, dtype=None,
name=None, row_splits_dtype=dtypes.int64):
"""Returns a `RaggedTensor` containing the specified sequences of numbers.
Each row of the returned `RaggedTensor` contains a single sequence:
```python
ragged.range(starts, limits, deltas)[i] ==
tf.range(starts[i], limits[i], deltas[i])
```
If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an
empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then
`output[i]` will be an empty list. This behavior is consistent with the
Python `range` function, but differs from the `tf.range` op, which returns
an error for these cases.
Examples:
```python
>>> ragged.range([3, 5, 2]).eval().tolist()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> ragged.range([0, 5, 8], [3, 3, 12]).eval().tolist()
[[0, 1, 2], [], [8, 9, 10, 11]]
>>> ragged.range([0, 5, 8], [3, 3, 12], 2).eval().tolist()
[[0, 2], [], [8, 10]]
```
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast
to match the size of the vector inputs.
Args:
starts: Vector or scalar `Tensor`. Specifies the first entry for each range
if `limits` is not `None`; otherwise, specifies the range limits, and the
first entries default to `0`.
limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for
each range.
deltas: Vector or scalar `Tensor`. Specifies the increment for each range.
Defaults to `1`.
dtype: The type of the elements of the resulting tensor. If not specified,
then a value is chosen based on the other args.
name: A name for the operation.
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` of type `dtype` with `ragged_rank=1`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if limits is None:
starts, limits = 0, starts
with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:
starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')
limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')
deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')
# infer dtype if not explicitly provided
if dtype is None:
starts, limits, deltas = _infer_matching_dtype(
[starts, limits, deltas],
[dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
result = gen_ragged_math_ops.ragged_range(
starts, limits, deltas, Tsplits=row_splits_dtype, name=name)
return ragged_tensor.RaggedTensor.from_row_splits(result.rt_dense_values,
result.rt_nested_splits,
validate=False)
def _infer_matching_dtype(tensors, dtype_hierarchy):
"""Infers a matching dtype for tensors, and casts them to that dtype."""
assert all(t.dtype in dtype_hierarchy for t in tensors)
inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index)
return [math_ops.cast(t, inferred_dtype) for t in tensors]
#===============================================================================
# ragged_segment_<AGGREGATE>
#===============================================================================
# Docstring template used for the raggged_segment_<AGGREGATE> ops.
_RAGGED_SEGMENT_DOCSTRING = """\
Computes the %(combination)s along segments of a RaggedTensor.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by taking the %(combination)s of all rows of `data`
whose corresponding `segment_id` is `i`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
data: A `RaggedTensor` containing the values to combine.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
Must be greater than or equal to zero, and less than `num_segments`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar specifying the number of
distinct segment ids.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If `segment_ids.shape` is not a prefix of `data.shape`.
"""
def _ragged_segment_aggregate(unsorted_segment_op,
data,
segment_ids,
num_segments,
name=None):
"""Aggregates along segments of a RaggedTensor using `unsorted_segment_op`.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by combining all rows of `data` whose corresponding
`segment_id` is `i`. The values in each row are combined using
`unsorted_segment_op`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in each row. Must have the same signature and basic behavior as
`unsorted_segment_sum`, `unsorted_segment_max`, etc.
data: A `RaggedTensor` containing the values to be combined.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the aggregated values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If segment_ids.shape is not a prefix of data.shape.
"""
if not (ragged_tensor.is_ragged(data) or
ragged_tensor.is_ragged(segment_ids)):
return unsorted_segment_op(data, segment_ids, num_segments, name)
with ops.name_scope(name, 'RaggedSegment',
[data, segment_ids, num_segments]) as name:
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor(
segment_ids, name='segment_ids')
data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids)
if segment_ids.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('segment_ids must have dtype int32 or int64.')
if ragged_tensor.is_ragged(segment_ids):
if not ragged_tensor.is_ragged(data):
raise ValueError('segment_ids.shape must be a prefix of data.shape, '
'but segment_ids is ragged and data is not.')
check_splits = check_ops.assert_equal(
segment_ids.row_splits,
data.row_splits,
message='segment_ids.shape must be a prefix of data.shape')
with ops.control_dependencies([check_splits]):
return _ragged_segment_aggregate(unsorted_segment_op, data.values,
segment_ids.values, num_segments, name)
# Find the length of each row in data. (shape=[data_nrows])
data_row_lengths = data.row_splits[1:] - data.row_splits[:-1]
# Find the length that each output row will have. The length of the row
# corresponding to segment `id` is `max(data_row_lengths[i])` where
# `segment_ids[i]=id`. (shape=[output_nrows])
output_row_lengths = math_ops.maximum(
math_ops.unsorted_segment_max(data_row_lengths, segment_ids,
num_segments), 0)
# Build the splits tensor for the output RaggedTensor.
output_splits = array_ops.concat([
array_ops.zeros([1], output_row_lengths.dtype),
math_ops.cumsum(output_row_lengths)
],
axis=0)
# For each row in `data`, find the start & limit position where that row's
# values will be aggregated in output.values.
data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids)
data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths
# For each value in `data.values`, find the position where it will
# aggregated in `output.values`.
# Get the target output values index for each data values index.
data_val_to_out_val_index = range(data_row_to_out_row_start,
data_row_to_out_row_limit).values
# Recursively aggregate the values.
output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values,
data_val_to_out_val_index,
output_splits[-1])
return ragged_tensor.RaggedTensor.from_row_splits(
output_values, output_splits, validate=False)
def segment_sum(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_sum, data,
segment_ids, num_segments, name or
'RaggedSegmentSum')
def segment_prod(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_prod, data,
segment_ids, num_segments, name or
'RaggedSegmentProd')
def segment_min(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_min, data,
segment_ids, num_segments, name or
'RaggedSegmentMin')
def segment_max(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_max, data,
segment_ids, num_segments, name or
'RaggedSegmentMax')
def segment_mean(data, segment_ids, num_segments, name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentMean',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values), data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(total.flat_values / count.flat_values)
else:
return total / count
def segment_sqrt_n(data, segment_ids, num_segments, name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentSqrtN',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values), data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(
total.flat_values / math_ops.sqrt(count.flat_values))
else:
return total / math_ops.sqrt(count)
def _set_ragged_segment_docstring(func, combination, combined):
func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict(
combination=combination, combined=combined)
_set_ragged_segment_docstring(segment_sum, 'sum', 'summed')
_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied')
_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized')
_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized')
_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged')
_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)',
'summed')
#===============================================================================
# ragged_reduce_<AGGREGATE>
#===============================================================================
# Docstring template used for ragged_reduce_<AGGREGATE> ops.
_RAGGED_REDUCE_DOCSTRING = """\
Computes the %(combination)s of elements across dimensions of a `RaggedTensor`.
Reduces `input_tensor` along the dimensions given in `axis` by taking the
%(combination)s of values. If a reduced dimension has no elements for
some index, then the value for that index will be %(default)s.
The rank of the tensor is reduced by `1` for each entry in `axis`. If
`axis` is not specified, then all dimensions are reduced, and a scalar
value is returned.
Args:
input_tensor: A `RaggedTensor` containing the values to be %(combined)s.
axis: The dimensions to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce
a given set of axes), or a `Tensor` with a constant value. Must be in
the range `[0, input_tensor.rank]`.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `input_tensor.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
####Example:
```python%(example)s ```
"""
_RAGGED_REDUCE_SUM_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_sum(rt, axis=0).eval().tolist()
[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
>>> ragged.reduce_sum(rt, axis=1).eval().tolist()
[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
"""
_RAGGED_REDUCE_PROD_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_prod(rt, axis=0).eval().tolist()
[54, 30, 4] # = [3*1*9*2, 1*5*6, 4]
>>> ragged.reduce_prod(rt, axis=1).eval().tolist()
[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6]
"""
_RAGGED_REDUCE_MIN_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_min(rt, axis=0).eval().tolist()
[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4]
>>> ragged.reduce_min(rt, axis=1).eval().tolist()
[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)]
"""
_RAGGED_REDUCE_MAX_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_max(rt, axis=0).eval().tolist()
[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4]
>>> ragged.reduce_max(rt, axis=1).eval().tolist()
[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)]
"""
_RAGGED_REDUCE_MEAN_EXAMPLE = """
>>> rt = ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> ragged.reduce_mean(rt, axis=0).eval().tolist()
[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4]
>>> ragged.reduce_mean(rt, axis=1).eval().tolist()
[2.66666, 3, 9, 4] # = [mean(3, 1, 4), mean(1, 5), 9, mean(2, 6)]
"""
_RAGGED_REDUCE_ALL_EXAMPLE = """
>>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> ragged.reduce_all(rt, axis=0).eval().tolist()
[False, True, False, True]
>>> ragged.reduce_all(rt, axis=1).eval().tolist()
[True, False, False]
"""
_RAGGED_REDUCE_ANY_EXAMPLE = """
>>> rt = ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> ragged.reduce_any(rt, axis=0).eval().tolist()
[True, True, False, True]
>>> ragged.reduce_any(rt, axis=1).eval().tolist()
[True, True, True]
"""
def _ragged_reduce_aggregate(reduce_op,
unsorted_segment_op,
rt_input,
axis,
keepdims,
name=None):
"""Aggregates across axes of a RaggedTensor using the given `Tensor` ops.
Reduces `rt_input` along the dimensions given in `axis`. The rank of the
tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,
then all dimensions are reduced, and a scalar value is returned.
This op assumes that `reduce_op` and `unsorted_segment_op` are associative;
if not, then reducing multiple axes will return incorrect results. (In
particular, reducing multiple axes is currently implemented by reducing the
axes one at a time.)
Args:
reduce_op: The tensorflow `op` that should be used to reduce values in
uniform dimensions. Must have the same signature and basic behavior as
`reduce_sum`, `reduce_max`, etc.
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in ragged dimensions. Must have the same signature and basic
behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.
rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.
axis: The axis or axes to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a
given set of axes), or a `Tensor` with a constant value. Must be in the
range `[0, rt_input.rank)`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the reduced values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `rt_input.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
"""
if not ragged_tensor.is_ragged(rt_input):
return reduce_op(rt_input, axis, name=name)
if keepdims:
raise ValueError('keepdims=True is not supported for RaggedTensors.')
if isinstance(axis, ops.Tensor):
axis = tensor_util.constant_value(axis)
if axis is None:
raise ValueError('axis must be known at graph construction time.')
if isinstance(axis, np.ndarray):
axis = axis.tolist()
# When reducing all axes, just ignore splits & reduce the inner values.
if axis is None:
return reduce_op(rt_input.flat_values, None, name=name)
with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]):
if isinstance(axis, (tuple, list)):
if not axis:
return rt_input
elif len(axis) == 1:
axis = axis[0]
else:
# When reducing multiple axes, as we reduce one at a time (see below),
# the negative axis has to be converted to positive at the first run
# as the sort with negative axis will have different orders.
# See GitHub issue 27497.
axis = [
ragged_util.get_positive_axis(a, rt_input.shape.ndims) for a in axis
]
# When reducing multiple axes, just reduce one at a time. This is less
# efficient, and only works for associative ops. (In particular, it
# does not work for reduce_mean.) However, reducing multiple axes at
# once will probably require a nontrivial c++ op.
axis = sorted(axis)
inner_reduced = _ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input, axis[-1], keepdims)
return _ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
inner_reduced, axis[:-1], keepdims)
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input')
axis = ragged_util.get_positive_axis(axis, rt_input.shape.ndims)
if axis == 0:
# out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]
row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1]
num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0)
segment_ids = range(row_lengths).values
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments)
elif axis == 1:
# out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]
num_segments = array_ops.shape(rt_input.row_splits)[0] - 1
segment_ids = segment_id_ops.row_splits_to_segment_ids(
rt_input.row_splits)
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments)
else:
# out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =
# sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]
return rt_input.with_values(
_ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input.values, axis - 1, keepdims))
def reduce_sum(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return _ragged_reduce_aggregate(math_ops.reduce_sum,
math_ops.unsorted_segment_sum, input_tensor,
axis, keepdims, name or 'RaggedReduceSum')
def reduce_prod(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return _ragged_reduce_aggregate(math_ops.reduce_prod,
math_ops.unsorted_segment_prod, input_tensor,
axis, keepdims, name or 'RaggedReduceProd')
def reduce_min(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return _ragged_reduce_aggregate(math_ops.reduce_min,
math_ops.unsorted_segment_min, input_tensor,
axis, keepdims, name or 'RaggedReduceMin')
def reduce_max(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return _ragged_reduce_aggregate(math_ops.reduce_max,
math_ops.unsorted_segment_max, input_tensor,
axis, keepdims, name or 'RaggedReduceMax')
def reduce_mean(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]):
total = reduce_sum(input_tensor, axis, keepdims)
if ragged_tensor.is_ragged(input_tensor):
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(input_tensor.flat_values),
input_tensor.nested_row_splits, validate=False)
else:
ones = array_ops.ones_like(input_tensor)
count = reduce_sum(ones, axis, keepdims)
if ragged_tensor.is_ragged(total):
return ragged_tensor.RaggedTensor.from_nested_row_splits(
total.flat_values / count.flat_values, total.nested_row_splits,
validate=False)
else:
return total / count
def _cast(input_tensor, dtype):
return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor,
dtype)
def reduce_all(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]):
return _cast(
reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
def reduce_any(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]):
return _cast(
reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
def _set_ragged_reduce_docstring(func, combination, combined, default, example):
func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict(
combination=combination,
combined=combined,
default=default,
example=example)
_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0',
_RAGGED_REDUCE_SUM_EXAMPLE)
_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1',
_RAGGED_REDUCE_PROD_EXAMPLE)
_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized',
'`input_tensor.dtype.min`',
_RAGGED_REDUCE_MIN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized',
'`input_tensor.dtype.max`',
_RAGGED_REDUCE_MAX_EXAMPLE)
_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN',
_RAGGED_REDUCE_MEAN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True',
_RAGGED_REDUCE_ALL_EXAMPLE)
_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False',
_RAGGED_REDUCE_ANY_EXAMPLE)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_math_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Private convenience functions for RaggedTensors.
None of these methods are exposed in the main "ragged" package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_ragged_math_ops
from tensorflow.python.ops import math_ops
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
"""Converts the given value to an integer Tensor."""
tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
if tensor.dtype.is_integer:
tensor = math_ops.cast(tensor, dtype)
else:
raise TypeError(
"%s must be an integer tensor; dtype=%s" % (name, tensor.dtype))
return tensor
def get_positive_axis(axis, ndims):
"""Validate an `axis` parameter, and normalize it to be positive.
If `ndims` is known (i.e., not `None`), then check that `axis` is in the
range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or
`axis + ndims` (otherwise).
If `ndims` is not known, and `axis` is positive, then return it as-is.
If `ndims` is not known, and `axis` is negative, then report an error.
Args:
axis: An integer constant
ndims: An integer constant, or `None`
Returns:
The normalized `axis` value.
Raises:
ValueError: If `axis` is out-of-bounds, or if `axis` is negative and
`ndims is None`.
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
if ndims is not None:
if 0 <= axis < ndims:
return axis
elif -ndims <= axis < 0:
return axis + ndims
else:
raise ValueError(
"axis=%s out of bounds: expected %s<=axis<%s" % (axis, -ndims, ndims))
elif axis < 0:
raise ValueError("axis may only be negative if ndims is statically known.")
return axis
def assert_splits_match(nested_splits_lists):
"""Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
"""
error_msg = "Inputs must have identical ragged splits"
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [
check_ops.assert_equal(s1, s2, message=error_msg)
for splits_list in nested_splits_lists[1:]
for (s1, s2) in zip(nested_splits_lists[0], splits_list)
]
# This op is intended to exactly match the semantics of numpy.repeat, with
# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
# when axis is not specified. Rather than implement that special behavior, we
# simply make `axis` be a required argument.
#
# External (OSS) `tf.repeat` feature request:
# https://github.com/tensorflow/tensorflow/issues/8246
def repeat(data, repeats, axis, name=None):
"""Repeats elements of `data`.
Args:
data: An `N`-dimensional tensor.
repeats: A 1-D integer tensor specifying how many times each element in
`axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.
Supports broadcasting from a scalar value.
axis: `int`. The axis along which to repeat values. Must be less than
`max(N, 1)`.
name: A name for the operation.
Returns:
A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,
except that dimension `axis` has size `sum(repeats)`.
#### Examples:
```python
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
['a', 'a', 'a', 'c', 'c']
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
[[1, 2], [1, 2], [3, 4], [3, 4], [3, 4]]
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
[[1, 1, 2, 2, 2], [3, 3, 4, 4, 4]]
```
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
with ops.name_scope(name, "Repeat", [data, repeats]):
data = ops.convert_to_tensor(data, name="data")
repeats = convert_to_int_tensor(repeats, name="repeats")
repeats.shape.with_rank_at_most(1)
# If `data` is a scalar, then upgrade it to a vector.
data = _with_nonzero_rank(data)
data_shape = array_ops.shape(data)
# If `axis` is negative, then convert it to a positive value.
axis = get_positive_axis(axis, data.shape.ndims)
# Check data Tensor shapes.
if repeats.shape.ndims == 1:
data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
# If we know that `repeats` is a scalar, then we can just tile & reshape.
if repeats.shape.ndims == 0:
expanded = array_ops.expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, repeats)
result_shape = array_ops.concat(
[data_shape[:axis], [-1], data_shape[axis + 1:]], axis=0)
return array_ops.reshape(tiled, result_shape)
# Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
if repeats.shape.ndims != axis + 1:
repeats_shape = array_ops.shape(repeats)
repeats_ndims = array_ops.rank(repeats)
broadcast_shape = array_ops.concat(
[data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
repeats = array_ops.broadcast_to(repeats, broadcast_shape)
repeats.set_shape([None] * (axis + 1))
# Create a "sequence mask" based on `repeats`, where slices across `axis`
# contain one `True` value for each repetition. E.g., if
# `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
max_repeat = math_ops.maximum(0, math_ops.reduce_max(repeats))
mask = array_ops.sequence_mask(repeats, max_repeat)
# Add a new dimension around each value that needs to be repeated, and
# then tile that new dimension to match the maximum number of repetitions.
expanded = array_ops.expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
# Use `boolean_mask` to discard the extra repeated values. This also
# flattens all dimensions up through `axis`.
masked = array_ops.boolean_mask(tiled, mask)
# Reshape the output tensor to add the outer dimensions back.
if axis == 0:
result = masked
else:
result_shape = array_ops.concat(
[data_shape[:axis], [-1], data_shape[axis + 1:]], axis=0)
result = array_ops.reshape(masked, result_shape)
# Preserve shape information.
if data.shape.ndims is not None:
new_axis_size = 0 if repeats.shape[0] == 0 else None
result.set_shape(data.shape[:axis].concatenate(
[new_axis_size]).concatenate(data.shape[axis + 1:]))
return result
def tile_one_dimension(data, axis, multiple):
"""Tiles a single dimension of a tensor."""
# Assumes axis is a nonnegative int.
if data.shape.ndims is not None:
multiples = [1] * data.shape.ndims
multiples[axis] = multiple
else:
ones = array_ops.ones(array_ops.rank(data), dtypes.int32)
multiples = array_ops.concat([ones[:axis], [multiple], ones[axis + 1:]],
axis=0)
return array_ops.tile(data, multiples)
def _with_nonzero_rank(data):
"""If `data` is scalar, then add a dimension; otherwise return as-is."""
if data.shape.ndims is not None:
if data.shape.ndims == 0:
return array_ops.stack([data])
else:
return data
else:
data_shape = array_ops.shape(data)
data_ndims = array_ops.rank(data)
return array_ops.reshape(
data,
array_ops.concat([[1], data_shape], axis=0)[-data_ndims:])
def lengths_to_splits(lengths):
"""Returns splits corresponding to the given lengths."""
return array_ops.concat([[0], math_ops.cumsum(lengths)], axis=-1)
def repeat_ranges(params, splits, repeats):
"""Repeats each range of `params` (as specified by `splits`) `repeats` times.
Let the `i`th range of `params` be defined as
`params[splits[i]:splits[i + 1]]`. Then this function returns a tensor
containing range 0 repeated `repeats[0]` times, followed by range 1 repeated
`repeats[1]`, ..., followed by the last range repeated `repeats[-1]` times.
Args:
params: The `Tensor` whose values should be repeated.
splits: A splits tensor indicating the ranges of `params` that should be
repeated.
repeats: The number of times each range should be repeated. Supports
broadcasting from a scalar value.
Returns:
A `Tensor` with the same rank and type as `params`.
#### Example:
```python
>>> repeat_ranges(['a', 'b', 'c'], [0, 2, 3], 3)
['a', 'b', 'a', 'b', 'a', 'b', 'c', 'c', 'c']
```
"""
# Divide `splits` into starts and limits, and repeat them `repeats` times.
if repeats.shape.ndims != 0:
repeated_starts = repeat(splits[:-1], repeats, axis=0)
repeated_limits = repeat(splits[1:], repeats, axis=0)
else:
# Optimization: we can just call repeat once, and then slice the result.
repeated_splits = repeat(splits, repeats, axis=0)
n_splits = array_ops.shape(repeated_splits, out_type=repeats.dtype)[0]
repeated_starts = repeated_splits[:n_splits - repeats]
repeated_limits = repeated_splits[repeats:]
# Get indices for each range from starts to limits, and use those to gather
# the values in the desired repetition pattern.
one = array_ops.ones((), repeated_starts.dtype)
offsets = gen_ragged_math_ops.ragged_range(
repeated_starts, repeated_limits, one)
return array_ops.gather(params, offsets.rt_dense_values)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operator dispatch for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_batch_gather_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_squeeze_op
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
# @TODO(edloper): Set this to True in the CL that exports RaggedTensors.
_UPDATE_DOCSTRINGS = False
# Information about an argument to an operation: The name of the argument, its
# position in the argument list, and a boolean flag indicating whether it
# expects a list of tensors.
_ArgInfo = collections.namedtuple('ArgInfo', ['name', 'position', 'is_list'])
def _get_arg_infos(func, arg_names):
"""Returns an `_ArgInfo` for each argument of `func` specified by `arg_names`.
Args:
func: The function whose arguments should be described.
arg_names: The names of the arguments to get info for.
Returns:
A tuple of `_ArgInfo`s.
"""
arg_infos = []
# Inspect the func's argspec to find the position of each arg.
arg_spec = tf_inspect.getargspec(func)
for argname in arg_names:
assert isinstance(argname, str)
is_list = argname.startswith('[') and argname.endswith(']')
if is_list:
argname = argname[1:-1]
if argname not in arg_spec.args:
raise ValueError('Argument %r not found function in %s. Args=%s' %
(argname, func, arg_spec.args))
arg_infos.append(_ArgInfo(argname, arg_spec.args.index(argname), is_list))
return arg_infos
def _is_convertible_to_tensor(value):
"""Returns true if `value` is convertible to a `Tensor`."""
if value is None:
return True
if isinstance(value,
(ops.Tensor, variables.Variable, np.ndarray, int, float, str)):
return True
elif isinstance(value, (sparse_tensor.SparseTensor,)):
return False
else:
try:
ops.convert_to_tensor(value)
return True
except (TypeError, ValueError):
return False
class UnaryRaggedElementwiseDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for unary ops that map a base op across ragged values."""
def __init__(self, original_op, arg_is_list=False):
self._original_op = original_op
self._arg_is_list = arg_is_list
arg_names = tf_inspect.getfullargspec(original_op)[0]
self._x = arg_names[0]
if _UPDATE_DOCSTRINGS:
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' `{x}` may be a `tf.RaggedTensor`.\n'.format(x=self._x))
def handle(self, args, kwargs):
if args:
x, args = args[0], args[1:]
else:
kwargs = kwargs.copy()
x = kwargs.pop(self._x, None)
if x is None:
return self.NOT_SUPPORTED
if self._arg_is_list:
found_ragged = False
for elt in x:
if ragged_tensor.is_ragged(elt):
found_ragged = True
elif not _is_convertible_to_tensor(elt):
return self.NOT_SUPPORTED
if found_ragged:
x = ragged_tensor.match_row_splits_dtypes(*x)
nested_splits_lists = [
elt.nested_row_splits for elt in x if ragged_tensor.is_ragged(elt)
]
flat_values = [
elt.flat_values if ragged_tensor.is_ragged(elt) else elt
for elt in x
]
with ops.control_dependencies(
ragged_util.assert_splits_match(nested_splits_lists)):
return ragged_tensor.RaggedTensor.from_nested_row_splits(
self._original_op(flat_values, *args, **kwargs),
nested_splits_lists[0], validate=False)
else:
return self.NOT_SUPPORTED
else:
found_ragged = ragged_tensor.is_ragged(x)
if found_ragged:
mapped_values = self._original_op(x.flat_values, *args, **kwargs)
return x.with_flat_values(mapped_values)
else:
return self.NOT_SUPPORTED
class BinaryRaggedElementwiseDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for binary ops that map a base op across ragged values.
Supports broadcasting.
"""
def __init__(self, original_op):
self._original_op = original_op
arg_names = tf_inspect.getfullargspec(original_op)[0]
self._x = arg_names[0]
self._y = arg_names[1]
if _UPDATE_DOCSTRINGS:
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' `{x}` and `{y}` may be a `tf.RaggedTensor`.\n'.format(
x=self._x, y=self._y))
def handle(self, args, kwargs):
# Extract the binary args.
if len(args) > 1:
x = args[0]
y = args[1]
args = args[2:]
elif args:
kwargs = kwargs.copy()
x = args[0]
y = kwargs.pop(self._y, None)
args = args[1:]
else:
kwargs = kwargs.copy()
x = kwargs.pop(self._x, None)
y = kwargs.pop(self._y, None)
# Bail if we don't have at least one ragged argument.
x_is_ragged = ragged_tensor.is_ragged(x)
y_is_ragged = ragged_tensor.is_ragged(y)
if not (x_is_ragged or y_is_ragged):
return self.NOT_SUPPORTED
# Convert args to tensors. Bail if conversion fails.
try:
if not x_is_ragged:
x = ops.convert_to_tensor(x, name=self._x, preferred_dtype=y.dtype)
if not y_is_ragged:
y = ops.convert_to_tensor(y, name=self._y, preferred_dtype=x.dtype)
except (TypeError, ValueError):
return self.NOT_SUPPORTED
if x_is_ragged and y_is_ragged:
x, y = ragged_tensor.match_row_splits_dtypes(x, y)
if ((x_is_ragged and y_is_ragged) or
(x_is_ragged and x.flat_values.shape.ndims <= y.shape.ndims) or
(y_is_ragged and y.flat_values.shape.ndims <= x.shape.ndims)):
bcast_shape = ragged_tensor_shape.broadcast_dynamic_shape(
ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x),
ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y))
x = ragged_tensor_shape.broadcast_to(
x, bcast_shape, broadcast_inner_dimensions=False)
y = ragged_tensor_shape.broadcast_to(
y, bcast_shape, broadcast_inner_dimensions=False)
x_values = x.flat_values if ragged_tensor.is_ragged(x) else x
y_values = y.flat_values if ragged_tensor.is_ragged(y) else y
mapped_values = self._original_op(x_values, y_values, *args, **kwargs)
if ragged_tensor.is_ragged(x):
return x.with_flat_values(mapped_values)
else:
return y.with_flat_values(mapped_values)
class RaggedDispatcher(dispatch.OpDispatcher):
"""OpDispatcher for ragged ops.
Dispatches to a wrapped op-handler if at least one of the `tensor_args`
arguments is a RaggedTensor or a RaggedTensorValue; and all of the
`tensor_args` arguments are convertible to Tensor or RaggedTensor.
"""
def __init__(self, original_op, ragged_op, ragged_args):
op_arg_names = tf_inspect.getfullargspec(original_op)[0]
ragged_arg_names = tf_inspect.getfullargspec(ragged_op)[0]
if op_arg_names != ragged_arg_names:
raise AssertionError(
'Signature must exactly match when overriding %s with %s: %s vs %s' %
(original_op, ragged_op, op_arg_names, ragged_arg_names))
self._ragged_op = ragged_op
self._ragged_args = _get_arg_infos(ragged_op, ragged_args)
if _UPDATE_DOCSTRINGS:
arg_list = ' and '.join('`%s`' % arg for arg in ragged_args)
original_op.__doc__ = (
original_op.__doc__.rstrip() + '\n\n' +
' {0} may be a `tf.RaggedTensor`.\n'.format(arg_list))
def handle(self, args, kwargs):
if self.is_supported(args, kwargs):
return self._ragged_op(*args, **kwargs)
else:
return self.NOT_SUPPORTED
def is_supported(self, args, kwargs):
found_ragged = False
for arg_info in self._ragged_args:
if arg_info.position < len(args):
arg = args[arg_info.position]
else:
arg = kwargs.get(arg_info.name, None)
if arg_info.is_list:
if not isinstance(arg, (list, tuple)):
return False
for elt in arg:
if ragged_tensor.is_ragged(elt):
found_ragged = True
elif not _is_convertible_to_tensor(elt):
return False
else:
if ragged_tensor.is_ragged(arg):
found_ragged = True
elif not _is_convertible_to_tensor(arg):
return False
return found_ragged
_UNARY_ELEMENTWISE_OPS = [
array_ops.check_numerics,
array_ops.identity,
array_ops.ones_like,
array_ops.ones_like_v2,
array_ops.zeros_like,
array_ops.zeros_like_v2,
clip_ops.clip_by_value,
gen_bitwise_ops.invert,
math_ops.abs,
math_ops.acos,
math_ops.acosh,
math_ops.angle,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.cast,
math_ops.ceil,
math_ops.conj,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.floor,
math_ops.imag,
math_ops.is_finite,
math_ops.is_inf,
math_ops.is_nan,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.log_sigmoid,
math_ops.logical_not,
math_ops.negative,
math_ops.real,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.saturate_cast,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
parsing_ops.decode_compressed,
string_ops.string_to_number,
string_ops.string_to_hash_bucket,
string_ops.as_string,
string_ops.decode_base64,
string_ops.encode_base64,
string_ops.regex_full_match,
string_ops.regex_replace,
string_ops.string_strip,
string_ops.string_to_hash_bucket,
string_ops.string_to_hash_bucket_fast,
string_ops.string_to_hash_bucket_strong,
string_ops.substr,
string_ops.substr_v2,
string_ops.string_length,
string_ops.string_length_v2,
string_ops.unicode_script,
]
_UNARY_LIST_ELEMENTWISE_OPS = [
math_ops.add_n,
string_ops.string_join,
]
_BINARY_ELEMENTWISE_OPS = [
gen_bitwise_ops.bitwise_and,
gen_bitwise_ops.bitwise_or,
gen_bitwise_ops.bitwise_xor,
gen_bitwise_ops.left_shift,
gen_bitwise_ops.right_shift,
math_ops.add,
math_ops.atan2,
math_ops.complex,
math_ops.div_no_nan,
math_ops.divide,
math_ops.equal,
math_ops.floordiv,
math_ops.floormod,
math_ops.greater,
math_ops.greater_equal,
math_ops.less,
math_ops.less_equal,
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor,
math_ops.maximum,
math_ops.minimum,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.realdiv,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truediv,
math_ops.truncatediv,
math_ops.truncatemod,
]
# We don't need to register a separate delegation handler for these v1 ops,
# since they delegate to the v2 ops (which already have a handler). But we
# still want to include them in the ragged_op_list() output.
_V1_OPS_THAT_DELEGATE_TO_V2_OPS = [
math_ops.reduce_sum,
math_ops.reduce_prod,
math_ops.reduce_min,
math_ops.reduce_max,
math_ops.reduce_mean,
math_ops.reduce_any,
math_ops.reduce_all,
]
def _ragged_gather_v1(params, indices, validate_indices=None, name=None,
axis=0, batch_dims=0):
return ragged_gather_ops.gather(
params=params,
indices=indices,
validate_indices=validate_indices,
axis=axis,
batch_dims=batch_dims,
name=name)
def _ragged_gather_nd_v1(params, indices, name=None, batch_dims=0):
return ragged_gather_ops.gather_nd(
params=params,
indices=indices,
batch_dims=batch_dims,
name=name)
def _ragged_expand_dims_v1(input, axis=None, name=None, dim=None): # pylint: disable=redefined-builtin
if dim is not None:
axis = dim
return ragged_array_ops.expand_dims(input=input, axis=axis, name=name)
def _ragged_size_v1(input, name=None, out_type=dtypes.int32): # pylint: disable=redefined-builtin
return ragged_array_ops.size(input=input, out_type=out_type, name=name)
def _ragged_squeeze_v1(input, axis=None, name=None, squeeze_dims=None): # pylint: disable=redefined-builtin
axis = deprecation.deprecated_argument_lookup('axis', axis, 'squeeze_dims',
squeeze_dims)
return ragged_squeeze_op.squeeze(input, axis, name)
# (original_op, ragged_op, ragged_args)
_RAGGED_DISPATCH_OPS = [
(array_ops.batch_gather, ragged_batch_gather_ops.batch_gather,
['params', 'indices']),
(array_ops.concat, ragged_concat_ops.concat, ['[values]']),
(array_ops.expand_dims, _ragged_expand_dims_v1, ['input']),
(array_ops.expand_dims_v2, ragged_array_ops.expand_dims, ['input']),
(array_ops.gather, _ragged_gather_v1, ['params', 'indices']),
(array_ops.gather_v2, ragged_gather_ops.gather, ['params', 'indices']),
(array_ops.gather_nd, _ragged_gather_nd_v1, ['params', 'indices']),
(array_ops.gather_nd_v2, ragged_gather_ops.gather_nd, ['params',
'indices']),
(array_ops.rank, ragged_array_ops.rank, ['input']),
(array_ops.size, _ragged_size_v1, ['input']),
(array_ops.size_v2, ragged_array_ops.size, ['input']),
(array_ops.squeeze, _ragged_squeeze_v1, ['input']),
(array_ops.squeeze_v2, ragged_squeeze_op.squeeze, ['input']),
(array_ops.stack, ragged_concat_ops.stack, ['[values]']),
(array_ops.tile, ragged_array_ops.tile, ['input']),
(array_ops.where, ragged_where_op.where, ['condition', 'x', 'y']),
(math_ops.unsorted_segment_sum, ragged_math_ops.segment_sum,
['data', 'segment_ids']),
(math_ops.unsorted_segment_prod, ragged_math_ops.segment_prod,
['data', 'segment_ids']),
(math_ops.unsorted_segment_min, ragged_math_ops.segment_min,
['data', 'segment_ids']),
(math_ops.unsorted_segment_max, ragged_math_ops.segment_max,
['data', 'segment_ids']),
(math_ops.unsorted_segment_mean, ragged_math_ops.segment_mean,
['data', 'segment_ids']),
(math_ops.unsorted_segment_sqrt_n, ragged_math_ops.segment_sqrt_n,
['data', 'segment_ids']),
(math_ops.reduce_sum, ragged_math_ops.reduce_sum, ['input_tensor']),
(math_ops.reduce_prod, ragged_math_ops.reduce_prod, ['input_tensor']),
(math_ops.reduce_min, ragged_math_ops.reduce_min, ['input_tensor']),
(math_ops.reduce_max, ragged_math_ops.reduce_max, ['input_tensor']),
(math_ops.reduce_mean, ragged_math_ops.reduce_mean, ['input_tensor']),
(math_ops.reduce_any, ragged_math_ops.reduce_any, ['input_tensor']),
(math_ops.reduce_all, ragged_math_ops.reduce_all, ['input_tensor']),
]
def register_dispatchers():
"""Constructs & registers OpDispatchers for ragged ops."""
op_list = (
_UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS +
_BINARY_ELEMENTWISE_OPS + [x[0] for x in _RAGGED_DISPATCH_OPS])
for op in op_list:
_, undecorated_op = tf_decorator.unwrap(op)
if not hasattr(undecorated_op,
tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names):
raise AssertionError('Expected %s to be an exported symbol '
'(while adding a RaggedTensor dispatcher)')
for op in _UNARY_ELEMENTWISE_OPS:
UnaryRaggedElementwiseDispatcher(op).register(op)
for op in _UNARY_LIST_ELEMENTWISE_OPS:
UnaryRaggedElementwiseDispatcher(op, True).register(op)
for op in _BINARY_ELEMENTWISE_OPS:
BinaryRaggedElementwiseDispatcher(op).register(op)
for (original_op, ragged_op, args) in _RAGGED_DISPATCH_OPS:
RaggedDispatcher(original_op, ragged_op, args).register(original_op)
def _ragged_op_signature(op, ragged_args):
"""Returns a signature for the given op, marking ragged args in bold."""
op_name = tf_export.get_canonical_name_for_symbol(op)
argspec = tf_inspect.getfullargspec(op)
arg_names = argspec.args
# Mark ragged arguments in bold.
for pos in ragged_args:
arg_names[pos] = '**' + arg_names[pos] + '**'
# Add argument defaults.
for pos in range(-1, -len(argspec.defaults) - 1, -1):
arg_names[pos] += '=`{!r}`'.format(argspec.defaults[pos])
# Add varargs and keyword args
if argspec.varargs:
arg_names.append('*' + argspec.varargs)
if argspec.varkw:
arg_names.append('**' + argspec.varkw)
return '* `tf.{}`({})'.format(op_name, ', '.join(arg_names))
def _op_is_in_tf_version(op, version):
if version == 1:
return (tf_export.get_v1_names(tf_decorator.unwrap(op)[1]) or
op in _V1_OPS_THAT_DELEGATE_TO_V2_OPS)
elif version == 2:
return tf_export.get_v2_names(tf_decorator.unwrap(op)[1])
else:
raise ValueError('Expected version 1 or 2.')
def ragged_op_list(tf_version=1):
"""Returns a string listing operators that have dispathers registered."""
lines = []
for op in _UNARY_ELEMENTWISE_OPS + _UNARY_LIST_ELEMENTWISE_OPS:
if _op_is_in_tf_version(op, tf_version):
lines.append(_ragged_op_signature(op, [0]))
for op in _BINARY_ELEMENTWISE_OPS:
if _op_is_in_tf_version(op, tf_version):
lines.append(_ragged_op_signature(op, [0, 1]))
for op, _, ragged_args in _RAGGED_DISPATCH_OPS:
if _op_is_in_tf_version(op, tf_version):
arginfos = _get_arg_infos(op, ragged_args)
ragged_args = [arginfo.position for arginfo in arginfos]
lines.append(_ragged_op_signature(op, ragged_args))
return ('\n\n### Additional ops that support `RaggedTensor`\n\n'
'Arguments that accept `RaggedTensor`s are marked in **bold**.\n\n' +
'\n'.join(sorted(lines)) + 'n')
register_dispatchers()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_dispatch.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Array operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_dispatch # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_operators # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_shape
from tensorflow.python.ops.ragged import ragged_where_op
#===============================================================================
# ragged.batch_gather_with_default
#===============================================================================
def batch_gather_with_default(params,
indices,
default_value='',
name=None):
"""Same as `batch_gather` but inserts `default_value` for invalid indices.
This operation is similar to `batch_gather` except that it will substitute
the value for invalid indices with `default_value` as the contents.
See `batch_gather` for more details.
Args:
params: A potentially ragged tensor with shape `[B1...BN, P1...PM]` (`N>=0`,
`M>0`).
indices: A potentially ragged tensor with shape `[B1...BN, I]` (`N>=0`).
default_value: A value to be inserted in places where `indices` are out of
bounds. Must be the same dtype as params and either a scalar or rank 1.
name: A name for the operation (optional).
Returns:
A potentially ragged tensor with shape `[B1...BN, I, P2...PM]`.
`result.ragged_rank = max(indices.ragged_rank, params.ragged_rank)`.
#### Example:
```python
>>> params = tf.ragged.constant([
['a', 'b', 'c'],
['d'],
[],
['e']])
>>> indices = tf.ragged.constant([[1, 2, -1], [], [], [0, 10]])
>>> batch_gather_with_default(params, indices, 'FOO')
[['b', 'c', 'FOO'], [], [], ['e', 'FOO']]
```
"""
with ops.name_scope(name, 'RaggedBatchGatherWithDefault'):
params = ragged_tensor.convert_to_tensor_or_ragged_tensor(
params, name='params',
)
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices',
)
default_value = ragged_tensor.convert_to_tensor_or_ragged_tensor(
default_value, name='default_value',
)
row_splits_dtype, (params, indices, default_value) = (
ragged_tensor.match_row_splits_dtypes(params, indices, default_value,
return_dtype=True))
# TODO(hterry): lift this restriction and support default_values of
# of rank > 1
if (default_value.shape.ndims is not 0
and default_value.shape.ndims is not 1):
raise ValueError('"default_value" must be a scalar or vector')
upper_bounds = None
if indices.shape.ndims is None:
raise ValueError('Indices must have a known rank.')
if params.shape.ndims is None:
raise ValueError('Params must have a known rank.')
num_batch_dimensions = indices.shape.ndims - 1
pad = None
# The logic for this works as follows:
# - create a padded params, where:
# padded_params[b1...bn, 0] = default_value
# padded_params[b1...bn, i] = params[b1...bn, i-1] (i>0)
# - create an `upper_bounds` Tensor that contains the number of elements
# in each innermost rank. Broadcast `upper_bounds` to be the same shape
# as `indices`.
# - check to see which index in `indices` are out of bounds and substitute
# it with the index containing `default_value` (the first).
# - call batch_gather with the indices adjusted.
with ops.control_dependencies([
check_ops.assert_greater_equal(array_ops.rank(params),
array_ops.rank(indices))]):
if ragged_tensor.is_ragged(params):
row_lengths = ragged_array_ops.expand_dims(
params.row_lengths(axis=num_batch_dimensions),
axis=-1)
upper_bounds = math_ops.cast(row_lengths, indices.dtype)
pad_shape = _get_pad_shape(params, indices, row_splits_dtype)
pad = ragged_tensor_shape.broadcast_to(
default_value, pad_shape)
else:
params_shape = array_ops.shape(params)
pad_shape = array_ops.concat([
params_shape[:num_batch_dimensions],
[1],
params_shape[num_batch_dimensions + 1:params.shape.ndims]
], 0)
upper_bounds = params_shape[num_batch_dimensions]
pad = array_ops.broadcast_to(default_value, pad_shape)
# Add `default_value` as the first value in the innermost (ragged) rank.
pad = math_ops.cast(pad, params.dtype)
padded_params = array_ops.concat(
[pad, params], axis=num_batch_dimensions)
# Adjust the indices by substituting out-of-bound indices to the
# default-value index (which is the first element)
shifted_indices = indices + 1
is_out_of_bounds = (indices < 0) | (indices > upper_bounds)
adjusted_indices = ragged_where_op.where(
is_out_of_bounds,
x=array_ops.zeros_like(indices), y=shifted_indices,
)
return array_ops.batch_gather(
params=padded_params, indices=adjusted_indices, name=name)
def _get_pad_shape(params, indices, row_splits_dtype):
"""Gets the RaggedTensorDynamicShape for the pad tensor."""
num_batch_dimensions = indices.shape.ndims - 1
params_shape = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(
params, dim_size_dtype=row_splits_dtype)
# We want to create a pad tensor that can be concatenated with the params.
if params.shape.ndims == indices.shape.ndims:
# When params and indices are the same rank, the shape of the pad tensor is
# almost identical to params, except the last dimension which has size = 1.
if params_shape.num_inner_dimensions == 0:
pad_dims = params_shape.partitioned_dim_sizes[:-1] + (
array_ops.ones_like(params_shape.partitioned_dim_sizes[-1]),)
return ragged_tensor_shape.RaggedTensorDynamicShape(
pad_dims, [])
else:
return ragged_tensor_shape.RaggedTensorDynamicShape(
params_shape.partitioned_dim_sizes,
array_ops.concat([params_shape.inner_dim_sizes[:-1], [1]], axis=0))
else:
# When the rank of indices < params, the pad has the same dimension as
# params up to the 'num_batch_dimensions' rank. Every dimension after that
# has size 1.
pad_dims = None
if num_batch_dimensions == 0:
pad_dims = (constant_op.constant(1, dtype=row_splits_dtype),) + (
constant_op.constant([1], dtype=row_splits_dtype),) * (
params_shape.num_partitioned_dimensions -
num_batch_dimensions - 1)
else:
batch_dimensions = params_shape.partitioned_dim_sizes[
:num_batch_dimensions]
gather_dimension = params_shape.partitioned_dim_sizes[
num_batch_dimensions]
pad_dims = batch_dimensions + (
array_ops.ones_like(gather_dimension),) * (
params_shape.num_partitioned_dimensions - num_batch_dimensions)
return ragged_tensor_shape.RaggedTensorDynamicShape(
pad_dims, params_shape.inner_dim_sizes)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_batch_gather_with_default_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""where operation for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_tensor
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
: If both `x` and `y` are `None`:
Returns the coordinates of true elements of `condition`. The coordinates
are returned in a 2-D tensor with shape
`[num_true_values, dim_size(condition)]`, where `result[i]` is the
coordinates of the `i`th true value (in row-major order).
: If both `x` and `y` are non-`None`:
Returns a tensor formed by selecting values from `x` where condition is
true, and from `y` when condition is false. In particular:
: If `condition`, `x`, and `y` all have the same shape:
* `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true.
* `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false.
: Otherwise:
* `condition` must be a vector.
* `x` and `y` must have the same number of dimensions.
* The outermost dimensions of `condition`, `x`, and `y` must all have the
same size.
* `result[i] = x[i]` if `condition[i]` is true.
* `result[i] = y[i]` if `condition[i]` is false.
Args:
condition: A potentially ragged tensor of type `bool`
x: A potentially ragged tensor (optional).
y: A potentially ragged tensor (optional). Must be specified if `x` is
specified. Must have the same rank and type as `x`.
name: A name of the operation (optional)
Returns:
: If both `x` and `y` are `None`:
A `Tensor` with shape `(num_true, dim_size(condition))`.
: Otherwise:
A potentially ragged tensor with the same type, rank, and outermost
dimension size as `x` and `y`.
`result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`.
Raises:
ValueError: When exactly one of `x` or `y` is non-`None`; or when
`condition`, `x`, and `y` have incompatible shapes.
#### Examples:
```python
>>> # Coordinates where condition is true.
>>> condition = tf.compat.v1.ragged.constant_value(
... [[True, False, True], [False, True]])
>>> ragged.where(condition)
[[0, 0], [0, 2], [1, 1]]
>>> # Elementwise selection between x and y, based on condition.
>>> condition = tf.compat.v1.ragged.constant_value(
... [[True, False, True], [False, True]])
>>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y)
[['A', 'b', 'C'], ['d', 'E']]
>>> # Row selection between x and y, based on condition.
>>> condition = [True, False]
>>> x = tf.compat.v1.ragged.constant_value([['A', 'B', 'C'], ['D', 'E']])
>>> y = tf.compat.v1.ragged.constant_value([['a', 'b', 'c'], ['d', 'e']])
>>> ragged.where(condition, x, y)
[['A', 'B', 'C'], ['d', 'e']]
```
"""
if (x is None) != (y is None):
raise ValueError('x and y must be either both None or both non-None')
with ops.name_scope('RaggedWhere', name, [condition, x, y]):
condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(
condition, name='condition')
if x is None:
return _coordinate_where(condition)
else:
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')
condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y)
return _elementwise_where(condition, x, y)
def _elementwise_where(condition, x, y):
"""Ragged version of tf.where(condition, x, y)."""
condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)
x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)
y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)
if not (condition_is_ragged or x_is_ragged or y_is_ragged):
return array_ops.where(condition, x, y)
elif condition_is_ragged and x_is_ragged and y_is_ragged:
return ragged_functional_ops.map_flat_values(array_ops.where, condition, x,
y)
elif not condition_is_ragged:
# Concatenate x and y, and then use `gather` to assemble the selected rows.
condition.shape.assert_has_rank(1)
x_and_y = ragged_concat_ops.concat([x, y], axis=0)
x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype)
y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype)
indices = array_ops.where(condition, math_ops.range(x_nrows),
x_nrows + math_ops.range(y_nrows))
return ragged_gather_ops.gather(x_and_y, indices)
else:
raise ValueError('Input shapes do not match.')
def _coordinate_where(condition):
"""Ragged version of tf.where(condition)."""
if not isinstance(condition, ragged_tensor.RaggedTensor):
return array_ops.where(condition)
# The coordinate for each `true` value in condition.values.
selected_coords = _coordinate_where(condition.values)
# Convert the first index in each coordinate to a row index and column index.
condition = condition.with_row_splits_dtype(selected_coords.dtype)
first_index = selected_coords[:, 0]
selected_rows = array_ops.gather(condition.value_rowids(), first_index)
selected_row_starts = array_ops.gather(condition.row_splits, selected_rows)
selected_cols = first_index - selected_row_starts
# Assemble the row & column index with the indices for inner dimensions.
return array_ops.concat([
array_ops.expand_dims(selected_rows, 1),
array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:]
],
axis=1)
def _nrows(rt_input, out_type):
if isinstance(rt_input, ragged_tensor.RaggedTensor):
return rt_input.nrows(out_type=out_type)
else:
return array_ops.shape(rt_input, out_type=out_type)[0]
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_where_op.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_concat_ops.stack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedStackOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
dict(
descr='One rank-2 input (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']],), # shape=(3, None)
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]),
dict(
descr='One rank-2 input (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None)
axis=1,
expected=[
[[b'a00', b'a01']],
[[]],
[[b'a20', b'a21', b'a22']]]),
dict(
descr='One rank-2 input (ragged_rank=1), axis=2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None)
axis=2,
expected=[
[[b'a00'], [b'a01']], [],
[[b'a20'], [b'a21'], [b'a22']]]),
dict(
descr='One rank-2 input (ragged_rank=1), axis=-3',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']],), # shape=(3, None)
axis=-3,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]),
dict(
descr='One rank-2 input (ragged_rank=1), axis=-2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None)
axis=-2,
expected=[
[[b'a00', b'a01']],
[[]],
[[b'a20', b'a21', b'a22']]]),
dict(
descr='One rank-2 input (ragged_rank=1), axis=-1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']],), # shape=(3, None)
axis=-1,
expected=[
[[b'a00'], [b'a01']], [],
[[b'a20'], [b'a21'], [b'a22']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'],
[b'b10']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=1,
expected=[
[[b'a00', b'a01'], [b'b00']],
[[], [b'b10', b'b11', b'b12']],
[[b'a20', b'a21', b'a22'], [b'b20']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None)
axis=2,
expected=[
[[b'a00', b'b00'], [b'a01', b'b01']], [],
[[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-3',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=-3,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']], [[b'b00'],
[b'b10']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=-2,
expected=[
[[b'a00', b'a01'], [b'b00']],
[[], [b'b10', b'b11', b'b12']],
[[b'a20', b'a21', b'a22'], [b'b20']]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']]), # shape=(3, None)
axis=-1,
expected=[
[[b'a00', b'b00'], [b'a01', b'b01']], [],
[[b'a20', b'b20'], [b'a21', b'b21'], [b'a22', b'b22']]]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10']], # shape=(2, None)
[['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None)
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22']],
[[b'b00'], [b'b10']],
[[b'c00'], [b'c10', b'c11'], [b'c21']]]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None)
axis=1,
expected=[
[[b'a00', b'a01'], [b'b00'], []],
[[], [b'b10', b'b11', b'b12'], [b'c10', b'c11']],
[[b'a20', b'a21', b'a22'], [b'b20'], [b'c20', b'c21']]],
expected_shape=[3, None, None]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00', 'b01'], [], ['b20', 'b21', 'b22']], # shape=(3, None)
[['c00', 'c01'], [], ['c20', 'c21', 'c22']]), # shape=(3, None)
axis=2,
expected=[
[[b'a00', b'b00', b'c00'], [b'a01', b'b01', b'c01']], [],
[[b'a20', b'b20', b'c20'], [b'a21', b'b21', b'c21'],
[b'a22', b'b22', b'c22']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=0',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]),
axis=0,
expected=[
[[[b'a000', b'a001'], [b'a010']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111']]],
[[[b'b000']],
[[b'b100', b'b101'], [b'b110']]],
[[],
[[b'c100', b'c101', b'c102', b'c103']],
[[], [b'c210', b'c211']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [[], ['c110', 'c111']]]),
axis=1,
expected=[
[[[b'a000', b'a001'], [b'a010']], [[b'b000']], []],
[[[b'a100', b'a101', b'a102'], [b'a110', b'a111']],
[[b'b100', b'b101'], [b'b110']],
[[], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=2,
expected=[
[[[b'a000', b'a001'], [], [b'c000']],
[[b'a010'], [b'b010', b'b011'], [b'c010']]],
[[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []],
[[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=3',
rt_inputs=(
[[['a000', 'a001'], ['a010']]],
[[['b000', 'b001'], ['b010']]],
[[['c000', 'c001'], ['c010']]]),
axis=3,
expected=[[
[[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']],
[[b'a010', b'b010', b'c010']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=-2,
expected=[
[[[b'a000', b'a001'], [], [b'c000']],
[[b'a010'], [b'b010', b'b011'], [b'c010']]],
[[[b'a100', b'a101', b'a102'], [b'b100', b'b101'], []],
[[b'a110', b'a111'], [b'b110'], [b'c110', b'c111']]]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-1',
rt_inputs=(
[[['a000', 'a001'], ['a010']]],
[[['b000', 'b001'], ['b010']]],
[[['c000', 'c001'], ['c010']]]),
axis=-1,
expected=[[
[[b'a000', b'b000', b'c000'], [b'a001', b'b001', b'c001']],
[[b'a010', b'b010', b'c010']]]]),
dict(
descr='ragged_stack([uniform, ragged, uniform], axis=1)',
ragged_ranks=[0, 1, 0],
rt_inputs=(
[['0('], ['1('], ['2(']], # shape=(3, 1)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[')0'], [')1'], [')2']]), # shape=(3, 1)
axis=1,
expected=[
[[b'0('], [b'b00'], [b')0']],
[[b'1('], [b'b10', b'b11', b'b12'], [b')1']],
[[b'2('], [b'b20'], [b')2']]]),
dict(
descr='ragged_stack([uniform, uniform], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']],
[[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]),
dict(
descr='ragged_stack([uniform, ragged], axis=0)',
ragged_ranks=[0, 1],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21']],
[[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]]),
dict(
descr='ragged_stack([uniform, ragged], axis=0) with rank-3 inputs',
ragged_ranks=[0, 2],
rt_inputs=(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2)
[[[8], [8, 8]]]), # shape = (2, None, None)
axis=0,
expected=[[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], [[[8], [8, 8]]]]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-1',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-1,
expected=[
[[[0, 9], [1, 8]], [[2, 7], [3, 6]], [[4, 5], [5, 4]]],
[],
[[[6, 3], [7, 2]], [[8, 1], [9, 0]]]],
expected_shape=[3, None, 2, 2]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-2',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-2,
expected=[
[[[0, 1], [9, 8]], [[2, 3], [7, 6]], [[4, 5], [5, 4]]], [],
[[[6, 7], [3, 2]], [[8, 9], [1, 0]]]]),
dict(
descr='ragged_stack([vector, vector], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=([1, 2, 3], [4, 5, 6]),
axis=0,
expected=[[1, 2, 3], [4, 5, 6]]),
dict(
descr='One input (so just adds an outer dimension)',
rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],),
axis=0,
expected=[[[b'a00', b'a01'], [], [b'a20', b'a21']]]),
) # pyformat: disable
def testRaggedStack(self,
descr,
rt_inputs,
axis,
expected,
ragged_ranks=None,
expected_ragged_rank=None,
expected_shape=None):
if ragged_ranks is None:
ragged_ranks = [None] * len(rt_inputs)
rt_inputs = [
ragged_factory_ops.constant(rt_input, ragged_rank=rrank) # pylint: disable=g-long-ternary
if rrank != 0 else constant_op.constant(rt_input)
for (rt_input, rrank) in zip(rt_inputs, ragged_ranks)
]
stacked = ragged_concat_ops.stack(rt_inputs, axis)
if expected_ragged_rank is not None:
self.assertEqual(stacked.ragged_rank, expected_ragged_rank)
if expected_shape is not None:
self.assertEqual(stacked.shape.as_list(), expected_shape)
self.assertRaggedEqual(stacked, expected)
@parameterized.parameters(
dict(
rt_inputs=(),
axis=0,
error=ValueError,
message=r'rt_inputs may not be empty\.'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=r'foo',
error=TypeError,
message='axis must be an int'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=-4,
error=ValueError,
message='axis=-4 out of bounds: expected -3<=axis<3'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=3,
error=ValueError,
message='axis=3 out of bounds: expected -3<=axis<3'),
)
def testError(self, rt_inputs, axis, error, message):
self.assertRaisesRegexp(error, message, ragged_concat_ops.stack, rt_inputs,
axis)
def testSingleTensorInput(self):
"""Tests ragged_stack with a single tensor input.
Usually, we pass a list of values in for rt_inputs. However, you can
also pass in a single value (as with tf.stack), in which case it is
equivalent to expand_dims(axis=0). This test exercises that path.
"""
rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
stacked = ragged_concat_ops.stack(rt_inputs, 0)
self.assertRaggedEqual(stacked, [[[1, 2], [3, 4]]])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_stack_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = rt.to_tensor()
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = rt.to_tensor(default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
self.assertAllEqual(self.eval_to_list(dt), expected)
if expected_shape is not None:
dt_shape = array_ops.shape(dt)
self.assertAllEqual(dt_shape, expected_shape)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': [0],
'error': (ValueError, r'Shape \(1,\) must have rank at most 0'),
},
{
'rt_input': [[[1, 2], [3, 4]], [[5, 6]]],
'ragged_rank': 1,
'default': [7, 8, 9],
'error': (ValueError, r'Shapes \(3,\) and \(2,\) are incompatible'),
},
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
},
)
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
rt.to_tensor(default)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ragged operations for working with string Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("strings.bytes_split")
def string_bytes_split(input, name=None): # pylint: disable=redefined-builtin
"""Split string elements of `input` into bytes.
Examples:
```python
>>> tf.strings.to_bytes('hello')
['h', 'e', 'l', 'l', 'o']
>>> tf.strings.to_bytes(['hello', '123'])
<RaggedTensor [['h', 'e', 'l', 'l', 'o'], ['1', '2', '3']]>
```
Note that this op splits strings into bytes, not unicode characters. To
split strings into unicode characters, use `tf.strings.unicode_split`.
See also: `tf.io.decode_raw`, `tf.strings.split`, `tf.strings.unicode_split`.
Args:
input: A string `Tensor` or `RaggedTensor`: the strings to split. Must
have a statically known rank (`N`).
name: A name for the operation (optional).
Returns:
A `RaggedTensor` of rank `N+1`: the bytes that make up the soruce strings.
"""
with ops.name_scope(name, "StringsByteSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input,
name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(string_bytes_split(input.flat_values))
rank = input.shape.ndims
if rank is None:
raise ValueError("input must have a statically-known rank.")
if rank == 0:
return string_bytes_split(array_ops.stack([input]))[0]
elif rank == 1:
indices, values, shape = gen_string_ops.string_split(
input, delimiter="", skip_empty=False)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=values, value_rowids=indices[:, 0], nrows=shape[0],
validate=False)
else:
return string_bytes_split(ragged_tensor.RaggedTensor.from_tensor(input))
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_encode")
def unicode_encode(input,
output_encoding,
errors="replace",
replacement_char=65533,
name=None):
r"""Encodes each sequence of Unicode code points in `input` into a string.
`result[i1...iN]` is the string formed by concatenating the Unicode
codepoints `input[1...iN, :]`, encoded using `output_encoding`.
Args:
input: An `N+1` dimensional potentially ragged integer tensor with shape
`[D1...DN, num_chars]`.
output_encoding: Unicode encoding that should be used to encode each
codepoint sequence. Can be `"UTF-8"`, `"UTF-16-BE"`, or `"UTF-32-BE"`.
errors: Specifies the response when an invalid codepoint is encountered
(optional). One of:
* `'replace'`: Replace invalid codepoint with the
`replacement_char`. (default)
* `'ignore'`: Skip invalid codepoints.
* `'strict'`: Raise an exception for any invalid codepoint.
replacement_char: The replacement character codepoint to be used in place of
any invalid input when `errors='replace'`. Any valid unicode codepoint may
be used. The default value is the default unicode replacement character
which is 0xFFFD (U+65533).
name: A name for the operation (optional).
Returns:
A `N` dimensional `string` tensor with shape `[D1...DN]`.
#### Example:
```python
>>> input = [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> unicode_encode(input, 'UTF-8')
['G\xc3\xb6\xc3\xb6dnight', '\xf0\x9f\x98\x8a']
```
"""
with ops.name_scope(name, "UnicodeEncode", [input]):
input_tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(input)
if input_tensor.shape.ndims is None:
raise ValueError("Rank of input_tensor must be statically known.")
if ragged_tensor.is_ragged(input_tensor):
if input_tensor.flat_values.shape.ndims > 1:
# If the flat_values of our ragged tensor is multi-dimensional, we can
# process it separately and our output will have the same nested splits
# as our input.
return input_tensor.with_flat_values(
unicode_encode(input_tensor.flat_values, output_encoding, errors,
replacement_char))
elif input_tensor.ragged_rank > 1:
# Recursively process the values of the ragged tensor.
return input_tensor.with_values(
unicode_encode(input_tensor.values, output_encoding, errors,
replacement_char))
else:
# Our ragged tensor is of the correct shape (rank 1 flat_values tensor
# with ragged_rank of 1) so we can process it as normal.
return gen_string_ops.unicode_encode(
input_values=input_tensor.values,
input_splits=input_tensor.row_splits,
output_encoding=output_encoding,
errors=errors,
replacement_char=replacement_char)
else:
if input_tensor.shape.ndims == 2:
# The input tensor is of the correct 2-D shape, it's just not ragged.
return unicode_encode(
ragged_tensor.RaggedTensor.from_tensor(input_tensor),
output_encoding, errors, replacement_char)
elif input_tensor.shape.ndims > 2:
# We need to initially flatten the input tensor to 2-D, and then can
# reshape the output of our processed flattened tensor.
flat_input_tensor = array_ops.reshape(
input_tensor,
array_ops.stack([-1, array_ops.shape(input_tensor)[-1]]))
flat_output_tensor = unicode_encode(flat_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(flat_output_tensor, input_tensor.shape[:-1])
elif input_tensor.shape.ndims == 0:
raise ValueError("input_tensor's rank must be at least 1.")
else:
# Our input tensor is rank 1, so we create a ragged tensor with an added
# dimension to create the correct input shape & type, and then remove
# the additional dimension from the output and return the string scalar.
ragged_input_tensor = ragged_tensor.RaggedTensor.from_row_splits(
input_tensor,
array_ops.stack(
[0, array_ops.shape(input_tensor, out_type=dtypes.int32)[0]]),
validate=False)
output_tensor = unicode_encode(ragged_input_tensor, output_encoding,
errors, replacement_char)
return array_ops.reshape(output_tensor, [])
# pylint: disable=redefined-builtin
@tf_export("strings.unicode_decode")
def unicode_decode(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the Unicode codepoint for the `j`th character in
`input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_decode(input, 'UTF-8').tolist()
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
```
"""
with ops.name_scope(name, "UnicodeDecode", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=False)
@tf_export("strings.unicode_decode_with_offsets")
def unicode_decode_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
replace_control_characters=False,
name=None):
r"""Decodes each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(codepoints, start_offsets)` where:
* `codepoints[i1...iN, j]` is the Unicode codepoint for the `j`th character
in `input[i1...iN]`, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`; and in place of C0 control
characters in `input` when `replace_control_characters=True`.
replace_control_characters: Whether to replace the C0 control characters
`(U+0000 - U+001F)` with the `replacement_char`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_decode_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # codepoints
[[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]]
>>> result[1].tolist() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
```
"""
with ops.name_scope(name, "UnicodeDecodeWithOffsets", [input]):
return _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets=True)
@tf_export("strings.unicode_split")
def unicode_split(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string in `input` into a sequence of Unicode code points.
`result[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A `N+1` dimensional `int32` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensor is a `tf.Tensor` if `input` is a scalar, or a
`tf.RaggedTensor` otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> tf.strings.unicode_split(input, 'UTF-8').tolist()
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'],
['\xf0\x9f\x98\x8a']]
```
"""
with ops.name_scope(name, "UnicodeSplit", [input]):
codepoints = _unicode_decode(input, input_encoding, errors,
replacement_char, False, with_offsets=False)
return unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
@tf_export("strings.unicode_split_with_offsets")
def unicode_split_with_offsets(input,
input_encoding,
errors="replace",
replacement_char=0xFFFD,
name=None):
r"""Splits each string into a sequence of code points with start offsets.
This op is similar to `tf.strings.decode(...)`, but it also returns the
start offset for each character in its respective string. This information
can be used to align the characters with the original byte sequence.
Returns a tuple `(chars, start_offsets)` where:
* `chars[i1...iN, j]` is the substring of `input[i1...iN]` that encodes its
`j`th character, when decoded using `input_encoding`.
* `start_offsets[i1...iN, j]` is the start byte offset for the `j`th
character in `input[i1...iN]`, when decoded using `input_encoding`.
Args:
input: An `N` dimensional potentially ragged `string` tensor with shape
`[D1...DN]`. `N` must be statically known.
input_encoding: String name for the unicode encoding that should be used to
decode each string.
errors: Specifies the response when an input string can't be converted
using the indicated encoding. One of:
* `'strict'`: Raise an exception for any illegal substrings.
* `'replace'`: Replace illegal substrings with `replacement_char`.
* `'ignore'`: Skip illegal substrings.
replacement_char: The replacement codepoint to be used in place of invalid
substrings in `input` when `errors='replace'`.
name: A name for the operation (optional).
Returns:
A tuple of `N+1` dimensional tensors `(codepoints, start_offsets)`.
* `codepoints` is an `int32` tensor with shape `[D1...DN, (num_chars)]`.
* `offsets` is an `int64` tensor with shape `[D1...DN, (num_chars)]`.
The returned tensors are `tf.Tensor`s if `input` is a scalar, or
`tf.RaggedTensor`s otherwise.
#### Example:
```python
>>> input = [s.encode('utf8') for s in (u'G\xf6\xf6dnight', u'\U0001f60a')]
>>> result = tf.strings.unicode_split_with_offsets(input, 'UTF-8')
>>> result[0].tolist() # character substrings
[['G', '\xc3\xb6', '\xc3\xb6', 'd', 'n', 'i', 'g', 'h', 't'],
['\xf0\x9f\x98\x8a']]
>>> result[1].tolist() # offsets
[[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]]
```
"""
with ops.name_scope(name, "UnicodeSplitWithOffsets", [input]):
codepoints, offsets = _unicode_decode(input, input_encoding, errors,
replacement_char, False,
with_offsets=True)
chars = unicode_encode(
ragged_array_ops.expand_dims(codepoints, -1),
output_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char)
return chars, offsets
def _unicode_decode(input, input_encoding, errors, replacement_char,
replace_control_characters, with_offsets):
"""Decodes each string into a sequence of codepoints."""
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(input, name="input")
input_ndims = input.shape.ndims
if input_ndims is None:
raise ValueError("Rank of `input` must be statically known.")
if input_ndims > 1:
# Convert to a ragged tensor with ragged_rank = input_ndims - 1.
if not ragged_tensor.is_ragged(input):
input = ragged_tensor.RaggedTensor.from_tensor(
input, ragged_rank=input_ndims - 1)
elif input.ragged_rank < input_ndims - 1:
input = input.with_flat_values(
ragged_tensor.RaggedTensor.from_tensor(
input.flat_values,
ragged_rank=input_ndims - input.ragged_rank + 1))
# Reshape the input to a flat vector, and apply the gen_string_ops op.
if ragged_tensor.is_ragged(input):
flat_input = array_ops.reshape(input.flat_values, [-1])
else:
flat_input = array_ops.reshape(input, [-1])
if with_offsets:
decode_op = gen_string_ops.unicode_decode_with_offsets
else:
decode_op = gen_string_ops.unicode_decode
flat_result = decode_op(
input=flat_input,
input_encoding=input_encoding,
errors=errors,
replacement_char=replacement_char,
replace_control_characters=replace_control_characters)
if input_ndims == 0:
codepoints = flat_result.char_values
if with_offsets:
offsets = flat_result.char_to_byte_starts
else:
codepoints = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_values, flat_result.row_splits, validate=False)
if input_ndims > 1:
codepoints = input.with_flat_values(codepoints)
if with_offsets:
offsets = ragged_tensor.RaggedTensor.from_row_splits(
flat_result.char_to_byte_starts, flat_result.row_splits,
validate=False)
if input_ndims > 1:
offsets = input.with_flat_values(offsets)
if with_offsets:
return codepoints, offsets
else:
return codepoints
@tf_export("strings.split", v1=[])
def string_split_v2(input, sep=None, maxsplit=-1, name=None): # pylint: disable=redefined-builtin
"""Split elements of `input` based on `sep` into a `RaggedTensor`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `SparseTensor` or
`RaggedTensor` containing the split tokens. Empty tokens are ignored.
Example:
```python
>>> tf.strings.split('hello world')
<Tensor ['hello', 'world']>
>>> tf.strings.split(['hello world', 'a b c'])
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]>
```
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter string.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `RaggedTensor` of rank `N+1`, the strings split according to the
delimiter.
"""
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if isinstance(input, ragged_tensor.RaggedTensor):
return input.with_flat_values(
string_split_v2(input.flat_values, sep, maxsplit))
rank = input.shape.ndims
if rank == 0:
return string_split_v2(array_ops.stack([input]), sep, maxsplit)[0]
elif rank == 1 or rank is None:
sparse_result = string_ops.string_split_v2(
input, sep=sep, maxsplit=maxsplit)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
return string_split_v2(
ragged_tensor.RaggedTensor.from_tensor(input), sep, maxsplit)
@tf_export(v1=["string_split"])
@deprecation.deprecated_args(None,
"delimiter is deprecated, please use sep instead.",
"delimiter")
def string_split(source, sep=None, skip_empty=True, delimiter=None,
result_type="SparseTensor", name=None): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter`.
Let N be the size of `source` (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
or `RaggedTensor` containing the split tokens. Empty tokens are ignored.
If `sep` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
Examples:
```python
>>> tf.strings.split(['hello world', 'a b c'])
tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
values=['hello', 'world', 'a', 'b', 'c']
dense_shape=[2, 3])
>>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor")
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]>
```
Args:
source: `1-D` string `Tensor`, the strings to split.
sep: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1. Default is ' '.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
delimiter: deprecated alias for `sep`.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
name: A name for the operation (optional).
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `2`, the strings split according
to the delimiter. The first column of the indices corresponds to the row
in `source` and the second column corresponds to the index of the split
component in this row.
"""
with ops.name_scope(name, "StringSplit", [source]):
sparse_result = string_ops.string_split(
source, sep=sep, skip_empty=skip_empty, delimiter=delimiter)
if result_type == "SparseTensor":
return sparse_result
elif result_type == "RaggedTensor":
return ragged_tensor.RaggedTensor.from_value_rowids(
values=sparse_result.values,
value_rowids=sparse_result.indices[:, 0],
nrows=sparse_result.dense_shape[0],
validate=False)
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
# In TensorFlow 1.x, "tf.strings.split" uses the new signature (with maxsplit),
# but we need to add the result_type argument.
@tf_export(v1=["strings.split"])
def strings_split_v1(input=None, sep=None, maxsplit=-1, # pylint: disable=redefined-builtin
result_type="SparseTensor", source=None, name=None):
"""Split elements of `input` based on `sep`.
Let N be the size of `input` (typically N will be the batch size). Split each
element of `input` based on `sep` and return a `SparseTensor` or
`RaggedTensor` containing the split tokens. Empty tokens are ignored.
Examples:
```python
>>> tf.strings.split(['hello world', 'a b c'])
tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
values=['hello', 'world', 'a', 'b', 'c']
dense_shape=[2, 3])
>>> tf.strings.split(['hello world', 'a b c'], result_type="RaggedTensor")
<tf.RaggedTensor [['hello', 'world'], ['a', 'b', 'c']]>
```
If `sep` is given, consecutive delimiters are not grouped together and are
deemed to delimit empty strings. For example, `input` of `"1<>2<><>3"` and
`sep` of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
string, consecutive whitespace are regarded as a single separator, and the
result will contain no empty strings at the start or end if the string has
leading or trailing whitespace.
Note that the above mentioned behavior matches python's str.split.
Args:
input: A string `Tensor` of rank `N`, the strings to split. If
`rank(input)` is not known statically, then it is assumed to be `1`.
sep: `0-D` string `Tensor`, the delimiter character.
maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result.
result_type: The tensor type for the result: one of `"RaggedTensor"` or
`"SparseTensor"`.
source: alias for "input" argument.
name: A name for the operation (optional).
Raises:
ValueError: If sep is not a string.
Returns:
A `SparseTensor` or `RaggedTensor` of rank `N+1`, the strings split
according to the delimiter.
"""
input = deprecation.deprecated_argument_lookup(
"input", input, "source", source)
with ops.name_scope(name, "StringSplit", [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, dtype=dtypes.string, name="input")
if result_type == "SparseTensor" and input.shape.rank == 1:
return string_ops.string_split_v2(input, sep=sep, maxsplit=maxsplit)
ragged_result = string_split_v2(input, sep=sep, maxsplit=maxsplit)
if result_type == "SparseTensor":
return ragged_result.to_sparse()
elif result_type == "RaggedTensor":
return ragged_result
else:
raise ValueError("result_type must be 'RaggedTensor' or 'SparseTensor'.")
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_string_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_eval_using_default_session = ops._eval_using_default_session
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s. The term "potentially
ragged tensor" may be used to refer to a tensor that might be either a
`Tensor` or a `RaggedTensor`. The ragged-rank of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for four other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
```python
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
>>> rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
>>> rt3 = RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
>>> rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
>>> rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
```
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
```python
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print outer_rt.to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print outer_rt.ragged_rank
2
```
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
```python
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
```
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
```python
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3]),
.. row_splits=[0, 2, 5])
>>> print rt.to_list()
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print rt.shape
(2, ?, 3)
```
### RaggedTensor Shape Restrictions
The shape of a RaggedTensor is currently restricted to have the following
form:
* A single uniform dimension
* Followed by one or more ragged dimensions
* Followed by zero or more uniform dimensions.
This restriction follows from the fact that each nested `RaggedTensor`
replaces the uniform outermost dimension of its `values` with a uniform
dimension followed by a ragged dimension.
"""
#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
values,
row_splits,
cached_row_lengths=None,
cached_value_rowids=None,
cached_nrows=None,
internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`.
cached_row_lengths: A 1-D integer tensor with shape `[nrows]`
cached_value_rowids: A 1-D integer tensor with shape `[nvals]`.
cached_nrows: A 1-D integer scalar tensor.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
TypeError: If a row partitioning tensor has an inappropriate dtype.
TypeError: If exactly one row partitioning argument was not specified.
ValueError: If a row partitioning tensor has an inappropriate shape.
ValueError: If multiple partitioning arguments are specified.
ValueError: If nrows is specified but value_rowids is not None.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
# Validate the arguments.
if not isinstance(row_splits, ops.Tensor):
raise TypeError("Row-partitioning argument must be a Tensor, got %r" %
row_splits)
if not isinstance(values, (RaggedTensor, ops.Tensor)):
raise TypeError("values must be a Tensor or RaggedTensor, got %r" %
values)
if row_splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("Row-partitioning argument must be int32 or int64")
# Validate shapes & dtypes.
row_splits.shape.assert_has_rank(1)
values.shape.with_rank_at_least(1)
row_splits.set_shape([None])
if isinstance(values, RaggedTensor):
assert row_splits.dtype == values.row_splits.dtype
self._values = values
self._row_splits = row_splits
# Store any cached tensors. These are used to avoid unnecessary
# round-trip conversions when a RaggedTensor is constructed from
# lengths or rowids, and we later want those lengths/rowids back.
for tensor in [cached_row_lengths, cached_value_rowids, cached_nrows]:
if tensor is not None:
if not isinstance(tensor, ops.Tensor):
raise TypeError("Cached value must be a Tensor or None.")
elif tensor.dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cached value must be int32 or int64.")
self._cached_row_lengths = cached_row_lengths
self._cached_value_rowids = cached_value_rowids
self._cached_nrows = cached_nrows
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1]` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
values, value_rowids = cls._convert_values_and_row_partition(
values, value_rowids, "value_rowids")
if nrows is None:
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is None:
nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1
const_nrows = None
else:
const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0
nrows = ops.convert_to_tensor(const_nrows, value_rowids.dtype,
name="nrows")
else:
nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows")
const_nrows = tensor_util.constant_value(nrows)
if const_nrows is not None:
if const_nrows < 0:
raise ValueError("Expected nrows >= 0; got %d" % const_nrows)
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is not None and const_rowids.size > 0:
if not const_nrows >= const_rowids[-1] + 1:
raise ValueError(
"Expected nrows >= value_rowids[-1] + 1; got nrows=%d, "
"value_rowids[-1]=%d" % (const_nrows, const_rowids[-1]))
value_rowids.shape.assert_has_rank(1)
nrows.shape.assert_has_rank(0)
values.shape[:1].assert_is_compatible_with(value_rowids.shape)
if validate:
msg = "Arguments to from_value_rowids do not form a valid RaggedTensor"
nvals1 = _nrows(values)
nvals2 = _nrows(value_rowids)
checks = [
check_ops.assert_rank(value_rowids, 1, message=msg),
check_ops.assert_rank(nrows, 0, message=msg),
check_ops.assert_equal(nvals1, nvals2, message=msg),
check_ops.assert_non_negative(value_rowids[:1], message=msg),
_assert_monotonic_increasing(value_rowids, message=msg),
check_ops.assert_less(value_rowids[-1:], nrows, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
value_rowids = control_flow_ops.with_dependencies(checks, value_rowids)
# Convert value_rowids & nrows to row_splits.
# Note: we don't use segment_ids_to_row_splits() here because we want
# to save the intermediate value `row_lengths`, so we can cache it.
# TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the
# cast.
value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32)
nrows_int32 = math_ops.cast(nrows, dtypes.int32)
row_lengths = math_ops.bincount(
value_rowids_int32,
minlength=nrows_int32,
maxlength=nrows_int32,
dtype=value_rowids.dtype)
row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
if const_nrows is not None:
row_lengths.set_shape([const_nrows])
row_splits.set_shape([const_nrows + 1])
return cls(
values,
row_splits,
cached_row_lengths=row_lengths,
cached_value_rowids=value_rowids,
cached_nrows=nrows,
internal=True)
@classmethod
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(row_splits, (list, tuple)) and not row_splits:
raise ValueError("row_splits tensor may not be empty.")
if isinstance(row_splits, tensor_spec.TensorSpec):
return cls(values=values, row_splits=row_splits, internal=True)
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
values, row_splits = cls._convert_values_and_row_partition(
values, row_splits, "row_splits")
row_splits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_splits do not form a valid RaggedTensor"
nvals = _nrows(values, row_splits.dtype)
checks = [
check_ops.assert_rank(row_splits, 1, message=msg),
_assert_zero(row_splits[0], message=msg),
_assert_monotonic_increasing(row_splits, message=msg),
check_ops.assert_equal(row_splits[-1], nvals, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_splits = control_flow_ops.with_dependencies(checks, row_splits)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []])>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
values, row_lengths = cls._convert_values_and_row_partition(
values, row_lengths, "row_lengths")
row_lengths.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_lengths do not form a valid RaggedTensor"
nvals1 = math_ops.reduce_sum(row_lengths)
nvals2 = _nrows(values, row_lengths.dtype)
checks = [
check_ops.assert_rank(row_lengths, 1, message=msg),
check_ops.assert_non_negative(row_lengths, message=msg),
check_ops.assert_equal(nvals1, nvals2, message=msg)
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)
row_limits = math_ops.cumsum(row_lengths)
row_splits = array_ops.concat([[0], row_limits], axis=0)
return cls(
values=values,
row_splits=row_splits,
cached_row_lengths=row_lengths,
internal=True)
@classmethod
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values, row_starts = cls._convert_values_and_row_partition(
values, row_starts, "row_starts")
row_starts.shape.assert_has_rank(1)
nvals = _nrows(values, row_starts.dtype)
if validate:
msg = "Arguments to from_row_starts do not form a valid RaggedTensor"
checks = [
check_ops.assert_rank(row_starts, 1, message=msg),
_assert_zero(row_starts[:1], message=msg),
_assert_monotonic_increasing(row_starts, message=msg),
check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_starts = control_flow_ops.with_dependencies(checks, row_starts)
row_splits = array_ops.concat([row_starts, [nvals]], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
```python
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
```
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values, row_limits = cls._convert_values_and_row_partition(
values, row_limits, "row_limits")
row_limits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_limits do not form a valid RaggedTensor"
nvals = _nrows(values, row_limits.dtype)
checks = [
check_ops.assert_rank(row_limits, 1, message=msg),
check_ops.assert_non_negative(row_limits[:1], message=msg),
_assert_monotonic_increasing(row_limits, message=msg),
check_ops.assert_equal(row_limits[-1:], nvals, message=msg)
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_limits = control_flow_ops.with_dependencies(checks, row_limits)
zero = array_ops.zeros([1], row_limits.dtype)
row_splits = array_ops.concat([zero, row_limits], axis=0)
return cls(values=values, row_splits=row_splits, internal=True)
@classmethod
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError("nested_value_rowids must be a list of Tensors")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError("nested_nrows must be a list of Tensors")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError("nested_nrows must have the same length as "
"nested_value_rowids")
with ops.name_scope(
name, "RaggedFromNestedValueRowIds",
[flat_values] + list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(result, value_rowids, nrows,
validate=validate)
return result
@classmethod
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError("nested_row_splits must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError("nested_row_lengths must be a list of Tensors")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _convert_values_and_row_partition(cls, values, partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
partition: A row-partitioning tensor for the `RaggedTensor` being
constructed. I.e., one of: row_splits, row_lengths, row_starts,
row_limits, value_rowids.
name: The name of the row-partitioning tensor.
Returns:
A tuple (values, partition).
"""
if isinstance(values, RaggedTensor):
if isinstance(partition, ops.Tensor):
if partition.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("%s must have dtype int32 or int64" % name)
if values.row_splits.dtype != partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("dtype mismatch: %s (%s) vs values.row_splits (%s)"
% (name, partition.dtype, values.row_splits.dtype))
partition = math_ops.cast(partition, dtypes.int64)
values = values.with_row_splits_dtype(dtypes.int64)
else:
partition = ops.convert_to_tensor(partition, values.row_splits.dtype,
name=name)
else:
values = ops.convert_to_tensor(values, name="values")
partition = ops.convert_to_tensor(
partition, preferred_dtype=dtypes.int64,
name=name)
if partition.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("%s must have dtype int32 or int64" % name)
return (values, partition)
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
```python
>>> ragged.constant([[0], [1, 2]]).shape
TensorShape([Dimension(2), Dimension(None)])
>>> ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([Dimension(2), Dimension(None), Dimension(2)
```
"""
nrows = tensor_shape.dimension_at_index(self._row_splits.shape, 0) - 1
values_shape = self._values.shape
value_shape = values_shape[1:]
return tensor_shape.TensorShape([nrows, None]).concatenate(value_shape)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor.
Returns:
A Python `int` indicating the number of ragged dimensions in this ragged
tensor. The outermost dimension is not considered ragged.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
return self._values
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print rt.row_splits # indices of row splits in rt.values
tf.Tensor([0, 4, 4, 7, 8, 8])
```
"""
return self._row_splits
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print rt.flat_values()
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
```
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
```python
>>> rt = ragged.constant([[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits()):
... print('Splits for dimension %d: %s' % (i+1, splits))
Splits for dimension 1: [0, 1]
Splits for dimension 2: [0, 3, 3, 5]
Splits for dimension 3: [0, 4, 4, 7, 8, 8]
```
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.value_rowids()
tf.Tensor([0, 0, 0, 0, 2, 2, 2, 3]) # corresponds 1:1 with rt.values
```
"""
if self._cached_value_rowids is not None:
return self._cached_value_rowids
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return segment_id_ops.row_splits_to_segment_ids(self.row_splits)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.nrows() # rt has 5 rows.
5
```
"""
if out_type is None:
out_type = self._row_splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
if self._cached_nrows is not None:
return math_ops.cast(self._cached_nrows, out_type)
with ops.name_scope(name, "RaggedNRows", [self]):
return array_ops.shape(self.row_splits, out_type=out_type)[0] - 1
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_starts() # indices of row starts in rt.values
tf.Tensor([0, 4, 4, 7, 8])
```
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self.row_splits[:-1]
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
```python
>>> rt = ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> rt.values
tf.Tensor([3, 1, 4, 1, 5, 9, 2, 6])
>>> rt.row_limits() # indices of row limits in rt.values
tf.Tensor([4, 4, 7, 8, 8])
```
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self.row_splits[1:]
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
```python
>>> rt = ragged.constant([[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> rt.row_lengths(rt) # lengths of rows in rt
tf.Tensor([2, 0, 2, 1, 0])
>>> rt.row_lengths(axis=2) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
```
"""
if self._cached_row_lengths is not None:
return self._cached_row_lengths
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = ragged_util.get_positive_axis(axis, self.shape.ndims)
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_splits.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_splits.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rtnested_row_lengths()` is a tuple containing the `row_lengths` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
```python
>>> rt = ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape()
[5, 4]
```
"""
if out_type is None:
out_type = self._row_splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
return math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = array_ops.stack([splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
])
inner_dimensions = flat_values_shape[1:]
bbox = array_ops.concat([ragged_dimensions, inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_splits.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
new_values,
self._row_splits,
self._cached_row_lengths,
self._cached_value_rowids,
self._cached_nrows,
internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same
number of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, ops.Tensor):
return self.with_values(new_values)
else:
return self.with_values(self.values.with_flat_values(new_values))
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be int32 or int64")
if self._row_splits.dtype == dtype:
return self
row_splits = math_ops.cast(self._row_splits, dtype)
values = self._values
if isinstance(values, RaggedTensor):
values = values.with_row_splits_dtype(dtype)
cached_row_lengths = self._cached_row_lengths
if cached_row_lengths is not None:
cached_row_lengths = math_ops.cast(cached_row_lengths, dtype)
cached_value_rowids = self._cached_value_rowids
if cached_value_rowids is not None:
cached_value_rowids = math_ops.cast(cached_value_rowids, dtype)
cached_nrows = self._cached_nrows
if cached_value_rowids is not None:
cached_value_rowids = math_ops.cast(cached_value_rowids, dtype)
return RaggedTensor(values, row_splits, cached_row_lengths,
cached_value_rowids, cached_nrows, internal=True)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
```python
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
[[0, 0], [3, 0], [0, 0]],
[[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
```
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify lengths or padding, but not both")
if not isinstance(ragged_rank, int):
raise TypeError("ragged_rank expected int, got %r" % ragged_rank)
if ragged_rank <= 0:
raise ValueError(
"ragged_rank must be greater than 0; got %s" % ragged_rank)
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
# Flatten `tensor` to eliminate all but the last ragged dimension.
new_shape = array_ops.concat([
constant_op.constant([-1], row_splits_dtype),
input_shape[ragged_rank:]
],
axis=0)
flattened = array_ops.reshape(tensor, new_shape)
# Recursively convert the flattened tensor.
values = cls.from_tensor(flattened, lengths, padding,
row_splits_dtype=row_splits_dtype)
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = math_ops.cumprod(input_shape)
# Construct splits tensors for the dimensions that were flattened.
new_splits = [
math_ops.range(0, dim_size[dim - 1] + 1) * input_shape[dim]
for dim in range(1, ragged_rank)
]
return cls.from_nested_row_splits(values, new_splits, validate=False)
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault * array_ops.expand_dims(
math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
if isinstance(lengths,
(list, tuple)) and len(lengths) and not isinstance(
lengths[0], (int, float)):
# In this case, we've been given nested row lengths. Rather than
# reconstructing the tensor mask directly, we can recreate it as
# a boolean RaggedTensor, then densify that and use that as the
# mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(
masked_data, lengths, validate=False)
else:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
nrows = input_shape[0]
nvals = nrows * ncols
splits = math_ops.range(nrows + 1) * ncols
values_shape = array_ops.concat([[nvals], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
return cls.from_row_splits(values, splits, validate=False)
def to_tensor(self, default_value=None, name=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
Example:
```python
>>> rt = ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print rt.to_tensor()
[[9 8 7]
[0 0 0]
[6 5 0]
[4 0 0]]
```
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
# If ragged_rank > 1, then recursively convert the ragged values into a
# `Tensor` before we proceed.
values = self.values
if is_ragged(values):
values = values.to_tensor(default_value)
# Tile the default value, if necessary.
if default_value is not None:
if values.shape.ndims is not None:
default_value.shape.with_rank_at_most(values.shape.ndims - 1)
if (values.shape.ndims is None or default_value.shape.ndims is None or
values.shape.ndims != default_value.shape.ndims + 1):
value_shape = array_ops.shape(values)[1:]
default_value = array_ops.broadcast_to(default_value, value_shape)
default_value.shape.assert_is_compatible_with(values.shape[1:])
# Get the expected dense shape ([nrows, ncols] + value_shape).
rt_row_lengths = [self.row_splits[1:] - self.row_splits[:-1]]
nrows = array_ops.shape(self.row_splits,
out_type=self._row_splits.dtype)[0] - 1
ncols = math_ops.maximum(math_ops.reduce_max(rt_row_lengths), 0)
values_shape = array_ops.shape(values, out_type=self._row_splits.dtype)
value_shape = values_shape[1:]
nvals = values_shape[0]
# Build a default value if none was supplied.
if default_value is None:
default_value = array_ops.zeros(value_shape, dtype=values.dtype)
default_value.shape.assert_is_compatible_with(values.shape[1:])
default_value.set_shape(values.shape[1:])
# Get the row start indices, and expand to shape=[nrows, 1].
starts = array_ops.expand_dims(self.row_splits[:-1], 1)
# Get the row limit indices, and expand to shape=[nrows, 1].
limits = array_ops.expand_dims(self.row_splits[1:], 1)
# Get the column indices, and expand to shape=[1, ncols].
columns = array_ops.expand_dims(math_ops.range(0, ncols), 0)
# Build a list containing the values plus the default value. We will use
# tf.gather to collect values from this list for the `Tensor` (using
# nvals as the index for the default value).
values_and_default = array_ops.concat(
[values, array_ops.stack([default_value])], axis=0)
# Construct a matrix "indices" pointing into values_and_default. I.e.,
# output[r, c] = values_and_default[indices[r, c].
nondefault_index = starts + columns
has_value = nondefault_index < limits
default_index = array_ops.fill(array_ops.stack([nrows, ncols]), nvals)
indices = array_ops.where(has_value, nondefault_index, default_index)
# Gather the results into a `Tensor`.
return array_ops.gather(values_and_default, indices)
@classmethod
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
```python
>>> st = SparseTensor(indices=[[0, 1], [0, 2], [0, 3], [1, 0], [3, 0]],
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> rt.RaggedTensor.from_sparse(st).eval().tolist()
[[1, 2, 3], [4], [], [5]]
```
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError("Expected SparseTensor, got %s" % type(st_input).__name__)
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.SparseTensor`.
Example:
```python
>>> rt = ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> rt.to_sparse().eval()
SparseTensorValue(indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0], [3, 1]],
values=[1, 2, 3, 4, 5, 6],
dense_shape=[4, 3])
```
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
```python
>>> rt = ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = ragged.stack([et, et])
>>> ragged.RaggedTensor._from_variant( # scalar input.
et, dtype=tf.int32, output_ragged_rank=1).eval().tolist()
[[0], [1, 2]]
>>> ragged.RaggedTensor._from_variant( # batched input.
stacked_et, dtype=tf.int32, output_ragged_rank=2).eval().tolist()
[[[0], [1, 2]], [[0], [1, 2]]]
```
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This
is optional and inferred dynamically if not provided.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
"output_ragged_rank must be equal to input_ragged_rank +"
"variant.shape.ndims, found variant.shape.ndims: %d, "
"input_ragged_rank: %d, output_ragged_rank: %d" %
(variant.shape.ndims, input_ragged_rank, output_ragged_rank))
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype, dtypes.int64,
name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
#=============================================================================
# String Encoding
#=============================================================================
def __repr__(self):
if self._is_eager():
return "<tf.RaggedTensor %s>" % self.to_list()
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self._values,
self._row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if self._is_eager():
return self._eager_value().to_list()
else:
raise ValueError("RaggedTensor.to_list() is only supported in eager "
"mode; in graph mode, evaluate the RaggedTensor first "
"and then use RaggedTensorValue.to_list().")
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Indexing & Slicing
#=============================================================================
def __getitem__(self, key):
"""Returns the specified piece of this RaggedTensor."""
# See ragged_getitem.py for the documentation and implementation of this
# method.
#
# Note: the imports in ragged/__init__.py ensure that this method always
# gets overridden before it is called.
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
#=============================================================================
# Composite Tensor
#=============================================================================
@property
def _type_spec(self):
return RaggedTensorSpec(
shape=self.shape,
dtype=self.dtype,
ragged_rank=self.ragged_rank,
row_splits_dtype=self._row_splits.dtype)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError("Unexpected keyword args %r" % kwargs)
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(t.with_row_splits_dtype(dtypes.int64)
if isinstance(t, RaggedTensor) else t for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
#===============================================================================
# RaggedTensorSpec
#===============================================================================
@tf_export("RaggedTensorSpec")
class RaggedTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.RaggedTensor`."""
__slots__ = ["_shape", "_dtype", "_ragged_rank", "_row_splits_dtype"]
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else ops.Tensor
def __init__(self, shape=None, dtype=dtypes.float32, ragged_rank=None,
row_splits_dtype=dtypes.int64):
"""Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If
a shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the ragged rank of the RaggedTensor
to be described. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor.
One of `tf.int32` or `tf.int64`.
"""
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError("ragged_rank must be an int")
if rank is not None:
if ragged_rank >= rank:
raise ValueError("ragged_rank must be less than rank.")
def _serialize(self):
return (self._shape, self._dtype, self._ragged_rank, self._row_splits_dtype)
@property
def _component_specs(self):
if self._ragged_rank == 0:
return [tensor_spec.TensorSpec(self._shape, self._dtype)]
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)
specs = (
[tensor_spec.TensorSpec(flat_values_shape, self._dtype),
tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)] +
[inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
# Currently, Keras converts tensors to numpy and then calls from_components
# with those np.arrays. So if we see np.ndarrays, convert them to tensors.
# TODO(b/133606651) Update Keras to do something different here. Consider
# adding something like TypeSpec.from_numpy_components?
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(result, row_splits, internal=True)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
def _to_batched_tensor_list(self, value):
# pylint: disable=protected-access
return [value._to_variant(batched_input=True)]
def _from_compatible_tensor_list(self, tensor_list):
if self._ragged_rank <= 0:
raise ValueError(
"ragged_rank must be non-negative; got %s." % self._ragged_rank)
result = RaggedTensor._from_variant( # pylint: disable=protected-access
tensor_list[0], dtype=self._dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
outer_dim = tensor_shape.dimension_value(self._shape[0])
if outer_dim is not None:
result.row_splits.set_shape([outer_dim + 1])
result.flat_values.set_shape(
tensor_shape.TensorShape([None]).concatenate(
self._shape[1 + self._ragged_rank:]))
return result
def _batch(self, batch_size):
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype,
self._ragged_rank + 1)
def _unbatch(self):
# Note: Negative ragged_rank is allowed here because the dataset could
# be subsequently batched again. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype,
self._ragged_rank - 1)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
return cls(shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError("Tensor conversion requested dtype %s for "
"RaggedTensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
preferred_dtype=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor(
value=value, dtype=dtype, preferred_dtype=preferred_dtype, name=name)
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType(object):
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Array operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# Masking
#===============================================================================
@tf_export('ragged.boolean_mask')
def boolean_mask(data, mask, name=None):
"""Applies a boolean mask to `data` without flattening the mask dimensions.
Returns a potentially ragged tensor that is formed by retaining the elements
in `data` where the corresponding value in `mask` is `True`.
* `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]`
Where `j` is the `i`th `True` entry of `mask[a1...aA]`.
Note that `output` preserves the mask dimensions `a1...aA`; this differs
from `tf.boolean_mask`, which flattens those dimensions.
Args:
data: A potentially ragged tensor.
mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix
of `data`'s shape. `rank(mask)` must be known statically.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged tensor that is formed by retaining the elements in
`data` where the corresponding value in `mask` is `True`.
* `rank(output) = rank(data)`.
* `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`.
Raises:
ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is
not a prefix of `data.shape`.
#### Examples:
```python
>>> # Aliases for True & False so data and mask line up.
>>> T, F = (True, False)
>>> tf.ragged.boolean_mask( # Mask a 2D Tensor.
... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... mask=[[T, F, T], [F, F, F], [T, F, F]]).tolist()
[[1, 3], [], [7]]
>>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([[F, F, T], [F], [T, T]])).tolist()
[[3], [], [5, 6]]
>>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([True, False, True])).tolist()
[[1, 2, 3], [5, 6]]
```
"""
with ops.name_scope(name, 'RaggedMask', [data, mask]):
# Convert inputs to tensors.
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
mask = ragged_tensor.convert_to_tensor_or_ragged_tensor(
mask, dtypes.bool, name='mask')
row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes(
data, mask, return_dtype=True)
# Get static rank of mask.
if mask.shape.ndims is None:
raise ValueError('mask.shape.ndims must be known statically.')
elif mask.shape.ndims == 0:
raise ValueError('mask cannot be scalar.')
# If mask is ragged, then recurse with a non-ragged mask.
if ragged_tensor.is_ragged(mask):
if not ragged_tensor.is_ragged(data):
data = ragged_tensor.RaggedTensor.from_tensor(
data, ragged_rank=mask.ragged_rank,
row_splits_dtype=mask.row_splits.dtype)
# Check that mask.nested_row_splits is a prefix of
# data.nested_row_splits.
splits_list = [
mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank]
]
with ops.control_dependencies(
ragged_util.assert_splits_match(splits_list)):
# Strip off ragged `splits` until `mask` is non-ragged. Keep the splits
# that we strip off in `splits`, so we can add them back on after
# we recursively mask the non-ragged data.
splits = []
while ragged_tensor.is_ragged(mask):
if mask.shape.ndims > 2:
splits.append(mask.row_splits)
else:
# Count the number of True mask values in each row to find the
# lengths of the filtered rows; then convert to splits.
int_mask = ragged_functional_ops.map_flat_values(
math_ops.cast, mask, dtype=row_splits_dtype)
masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1)
splits.append(ragged_util.lengths_to_splits(masked_row_lengths))
mask = mask.values
data = data.values
# Recursively apply the nested non-ragged mask to the nested data.
masked_values = boolean_mask(data, mask)
# Add the ragged `splits` back to the result.
masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits(
masked_values, splits, validate=False)
return masked_values
# If mask is non-ragged and has rank 1, and data is ragged, then build a
# ragged tensor with the indicated rows.
elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1:
# Get the masked splits: first get the length of each row, then filter
# out the rows that we are deleting, and convert that filtered set of
# masks back to a splits tensor.
lengths = data.row_lengths()
masked_lengths = array_ops.boolean_mask(lengths, mask)
masked_splits = ragged_util.lengths_to_splits(masked_lengths)
# Get the masked values: first get row ids corresponding to each
# value, then use tf.gather to build a boolean mask that's false for
# values that come from rows that we are deleting, and use that mask to
# construct the masked values tensor.
segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits)
segment_mask = array_ops.gather(mask, segment_ids)
masked_values = boolean_mask(data.values, segment_mask)
return ragged_tensor.RaggedTensor.from_row_splits(masked_values,
masked_splits,
validate=False)
# If mask is non-ragged and has rank>1, then convert it to be ragged,
# with a ragged rank matching data.
if ragged_tensor.is_ragged(data):
mask = ragged_tensor.RaggedTensor.from_tensor(
mask, ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1),
row_splits_dtype=data.row_splits.dtype)
return boolean_mask(data, mask)
# Otherwise, data and mask are both `Tensor`s.
else:
# Apply `boolean_mask` to get the masked values.
masked_values = array_ops.boolean_mask(data, mask)
if mask.shape.ndims >= 2:
# Add the innermost ragged dimension. For each innermost cell, get the
# number of values it contains. Then flatten that to get a list of
# cell lengths, and convert it to splits. Finally, combine the splits
# and values to get the innermost ragged tensor.
masked_lengths = math_ops.count_nonzero(mask, axis=-1,
dtype=row_splits_dtype)
flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1])
masked_values = ragged_tensor.RaggedTensor.from_row_lengths(
masked_values, flattened_masked_lengths, validate=False)
# Wrap remaining ragged dimensions.
if mask.shape.ndims > 2:
mask_shape = array_ops.shape(mask, out_type=row_splits_dtype)
split_size = math_ops.cumprod(mask_shape) + 1
for dim in range(mask.shape.ndims - 3, -1, -1):
elt_size = mask_shape[dim + 1]
masked_splits = math_ops.range(split_size[dim]) * elt_size
masked_values = ragged_tensor.RaggedTensor.from_row_splits(
masked_values, masked_splits, validate=False)
return masked_values
#===============================================================================
# Tiling
#===============================================================================
def tile(input, multiples, name=None): # pylint: disable=redefined-builtin
"""Constructs a `RaggedTensor` by tiling a given `RaggedTensor`.
The values of `input` are replicated `multiples[i]` times along the
`i`th dimension (for each dimension `i`). For every dimension `axis` in
`input`, the length of each output element in that dimension is the
length of corresponding input element multiplied by `multiples[axis]`.
Args:
input: A `RaggedTensor`.
multiples: A 1-D integer `Tensor`. Length must be the same as the number of
dimensions in `input`.
name: A name for the operation (optional).
Returns:
A `RaggedTensor` with the same type, rank, and ragged_rank as `input`.
#### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> ragged.tile(rt, [3, 2])
[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]
```
"""
with ops.name_scope(name, 'RaggedTile', [input, multiples]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.tile(input, multiples, name)
multiples = ragged_util.convert_to_int_tensor(
multiples, name='multiples', dtype=input.row_splits.dtype)
multiples.shape.assert_has_rank(1)
# If the constant value of `multiples` is available, then we can use it
# to skip tiling dimensions where `multiples=1`.
const_multiples = tensor_util.constant_value(multiples)
return ragged_tensor.RaggedTensor.from_nested_row_splits(
_tile_ragged_values(input, multiples, const_multiples),
_tile_ragged_splits(input, multiples, const_multiples),
validate=False)
def _tile_ragged_values(rt_input, multiples, const_multiples=None):
"""Builds flat_values tensor for a tiled `RaggedTensor`.
Returns a tensor that repeats the values in
`rt_input.flat_values` in the
appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as
specified by `multiples`.
Args:
rt_input: The `RaggedTensor` whose values should be repeated.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A `Tensor` with the same type and rank as `rt_input.flat_values`.
#### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_values(rt, [3, 2])
[1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3]
```
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# Pointers to the values in `rt_input.flat_values`.
inner_value_ids = math_ops.range(nested_splits[-1][-1])
# For each ragged dimension (working from the innermost to outermost),
# expand `inner_value_ids` as necessary to tile that dimension.
prev_splits = None
for axis in range(ragged_rank, 0, -1):
# Ragged splits for this dimension.
splits = nested_splits[axis - 1]
# Adjust splits so they point into `inner_value_ids` (instead of just
# pointing into the next dimension's values).
if prev_splits is not None: # Not the first pass through the loop.
splits = array_ops.gather(prev_splits * multiples[axis + 1], splits)
# Repeat each element in this ragged dimension `multiples[axis]` times.
if const_multiples is None or const_multiples[axis] != 1:
inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits,
multiples[axis])
prev_splits = splits
# Gather the tiled inner values.
ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids)
# Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus
# `axis=range(ragged_rank, rank)`).
inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]],
axis=0)
return array_ops.tile(ragged_tiled_values, inner_repeats)
def _tile_ragged_splits(rt_input, multiples, const_multiples=None):
"""Builds nested_split tensors for a tiled `RaggedTensor`.
Returns a list of split tensors that can be used to construct the
`RaggedTensor` that tiles `rt_input` as specified by `multiples`.
Args:
rt_input: The `RaggedTensor` that is being tiled.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A list of 1-D integer `Tensor`s (one for each ragged dimension in
`rt_input`).
#### Example:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_splits(rt, [3, 2])
[0, 4, 6, 10, 12, 16, 18]
```
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# projected_splits[src_axis, dst_axis] contains the split points that divide
# the rows from src_axis in the list of dst_axis values. E.g.,
# projected_splits[i, i] = nested_splits[i], and
# projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]).
projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)]
for src_axis in range(ragged_rank):
for dst_axis in range(src_axis + 1, ragged_rank - 1):
projected_splits[src_axis][dst_axis] = array_ops.gather(
nested_splits[dst_axis],
projected_splits[src_axis][dst_axis - 1])
# For each ragged dimension: nested_splits[axis] -> result_splits[axis].
result_splits = []
for axis in range(ragged_rank):
# Get the length of each row for the input tensor for this dimension.
input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1]
# Multiply those lengths by the `multiples` of dimension axis+1, since
# each value will be repeated that number of times.
output_lengths = input_lengths * multiples[axis + 1]
# Repeat ranges of the row lengths as necessary for them to be tiled in
# each ragged dimension `d < axis`. (Start with dimension d=axis-1, and
# work our way up to dimension d=0.)
repeats = 1
for d in range(axis - 1, -1, -1):
if const_multiples is None or const_multiples[d + 1] != 1:
splits = projected_splits[d][axis - 1] * repeats
output_lengths = ragged_util.repeat_ranges(output_lengths, splits,
multiples[d + 1])
repeats *= multiples[d + 1]
# Tile splits for the outermost (uniform) dimension.
output_lengths = array_ops.tile(output_lengths, multiples[:1])
# Convert to splits.
result_splits.append(ragged_util.lengths_to_splits(output_lengths))
return result_splits
#===============================================================================
# Reshaping
#===============================================================================
def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin
"""Inserts a dimension with shape 1 into a potentially ragged tensor's shape.
Given a potentially ragged tenor `input`, this operation inserts a
dimension with size 1 at the dimension `axis` of `input`'s shape.
* If `input` is a `Tensor`, then this is equivalent to
`tf.expand_dims`.
* If `input` is ragged, and `axis=0`, then the new dimension will be
uniform; but the previously outermost dimension will become ragged.
* If `input` is ragged, and `0 < axis < input.ragged_rank`, then the
new dimension will be ragged.
* If `input` is ragged, and axis >= input.ragged_rank`, then the new
dimension will be uniform.
The following table gives some examples showing how `ragged.expand_dims`
impacts the shapes of different input tensors. Ragged dimensions are
indicated by enclosing them in parentheses.
input.shape | axis | result.shape
----------------------- | ---- | -----------------------------
`[D1, D2]` | `0` | `[1, D1, D2]`
`[D1, D2]` | `1` | `[D1, 1, D2]`
`[D1, D2]` | `2` | `[D1, D2, 1]`
`[D1, (D2), (D3), D4]` | `0` | `[1, (D1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `1` | `[D1, (1), (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), (1), (D3), D4]`
`[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]`
`[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]`
Args:
input: The potentially tensor that should be expanded with a new
dimension.
axis: An integer constant indicating where the new dimension should be
inserted.
name: A name for the operation (optional).
Returns:
A tensor with the same values as `input`, with an added dimension of
size 1 at `axis`.
#### Examples:
```python
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print rt.shape
TensorShape([2, None])
>>> expanded = ragged.expand_dims(rt, axis=0)
>>> print(expanded.shape, expanded)
TensorShape([1, None, None]) [[[1, 2], [3]]]
>>> expanded = ragged.expand_dims(rt, axis=1)
>>> print(expanded.shape, expanded)
TensorShape([2, None, None]) [[[1, 2]], [[3]]]
>>> expanded = ragged.expand_dims(rt, axis=2)
>>> print(expanded.shape, expanded)
TensorShape([2, None, 1]) [[[1], [2]], [[3]]]
```
"""
with ops.name_scope(name, 'RaggedExpandDims', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.expand_dims(input, axis)
ndims = None if input.shape.ndims is None else input.shape.ndims + 1
axis = ragged_util.get_positive_axis(axis, ndims)
if axis == 0:
values = input
splits = array_ops.stack([0, input.nrows()])
elif axis == 1:
values = input
splits = math_ops.range(input.nrows() + 1)
else:
values = expand_dims(input.values, axis - 1)
splits = input.row_splits
return ragged_tensor.RaggedTensor.from_row_splits(values, splits,
validate=False)
#===============================================================================
# RaggedTensor Size
#===============================================================================
def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin
"""Returns the size of a potentially ragged tensor.
The size of a ragged tensor is the size of its inner values.
Args:
input: A potentially ragged `Tensor`.
out_type: The numeric output type for the operation.
name: A name for the operation (optional).
Returns:
A Tensor of type `out_type`.
#### Example:
```python
>>> tf.size(tf.ragged.constant([[1, 2], [3]]))
3
```
"""
if ragged_tensor.is_ragged(input):
return array_ops.size(input.flat_values, out_type=out_type, name=name)
else:
return array_ops.size(input, out_type=out_type, name=name)
#===============================================================================
# ragged.rank
#===============================================================================
def rank(input, name=None): # pylint: disable=redefined-builtin
"""Returns the rank of a RaggedTensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, None, None]
t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
tf.rank(t) # 3
```
Args:
input: A `RaggedTensor`
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, 'RaggedRank', [input]) as name:
if not ragged_tensor.is_ragged(input):
return array_ops.rank(input, name)
return input.ragged_rank + array_ops.rank(input.flat_values)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_array_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shapes & broadcasting for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
class RaggedTensorDynamicShape(object):
"""A collection of tensors encoding the shape of a potentially ragged tensor.
Each `RaggedTensorDynamicShape` consists of an ordered list of dimension
sizes. There are two dimension types:
* "Uniform dimensions" are dimenisons where all slices have the same
length. `RaggedTensorDynamicShape` records the size of each uniform
dimension using a single scalar integer.
* "Ragged dimensions" are dimensions whose slices may have different
lengths. `RaggedTensorDynamicShape` records the size of each ragged
dimension using an integer vector containing the slice lengths for all
the slices across that dimension.
Furthermore, there are two ways a dimension might be encoded:
* "Partitioned dimensions" are dimensions that are encoded using a
`RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned
dimension must be uniform, and the innermost partitioned dimension must
be ragged.
* "Inner dimensions" are dimensions that are encoded using a
`RaggedTensor`'s `flat_values`. Inner dimensions are always uniform.
The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes`
and `inner_dim_sizes`:
* `paritioned_dim_sizes` is a list of tensors (one for each partitioned
dimension).
* For uniform dimensions, the tensor is an integer scalar specifying the
size of all slices across that dimension.
* For ragged dimensions, the tensor is an integer vector specifying the
size of each slice across that dimension.
* `inner_dim_sizes` is a single integer vector, where each element
specifies the size of a single inner dimension.
Examples:
Tensor | Ragged | Partitioned Dim Sizes | Inner Dim
: Rank : : Sizes
------------------------------ | ------ | ---------------------- | ----------
`[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3`
`[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` |
`[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2
`[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` |
"""
def __init__(self, partitioned_dim_sizes, inner_dim_sizes,
dim_size_dtype=None):
"""Creates a RaggedTensorDynamicShape.
Args:
partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
each partitioned dimension. If dimension `d` is uniform, then
`partitioned_dim_sizes[d]` must be an integer scalar, specifying the
size of all slices across dimension `d`. If dimension `d` is ragged,
then `partitioned_dim_sizes[d]` must be an integer vector, specifying
the size of each slice across dimension `d`.
inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
number of inner dimensions. `inner_dim_sizes[n]` is the size of all
slices across the `n`th inner dimension (which is the
`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
dim_size_dtype: dtype for dimension sizes. If not specified, then it
is chosen based on the dtypes of `partitioned_dim_sizes` and
`inner_dim_sizes`.
"""
assert isinstance(partitioned_dim_sizes, (list, tuple))
with ops.name_scope(None, 'RaggedTensorDynamicShape',
(partitioned_dim_sizes, inner_dim_sizes)):
partitioned_dim_sizes = tuple(
ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i)
for (i, size) in enumerate(partitioned_dim_sizes))
inner_dim_sizes = ops.convert_to_tensor(
inner_dim_sizes, name='inner_dim_sizes')
# Validate shapes.
if partitioned_dim_sizes:
for axis, dimension_size in enumerate(partitioned_dim_sizes):
if dimension_size.shape.ndims is None:
raise ValueError(
'rank of partitioned_dim_sizes[%d] is unknown' % axis)
dimension_size.shape.with_rank_at_most(1)
if partitioned_dim_sizes[0].shape.ndims == 1:
raise ValueError('outermost partitioned dimension must be uniform')
if partitioned_dim_sizes[-1].shape.ndims == 0:
raise ValueError('innermost partitioned dimension must be ragged')
inner_dim_sizes.shape.assert_has_rank(1)
# Convert dimension size tensors to a single dtype.
if dim_size_dtype is None:
dim_size_dtypes = set([p.dtype for p in partitioned_dim_sizes
if p.shape.ndims == 1])
if not dim_size_dtypes:
dim_size_dtype = dtypes.int64
elif len(dim_size_dtypes) == 1:
dim_size_dtype = dim_size_dtypes.pop()
else:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('partitioned_dim_sizes must have matching dtypes')
dim_size_dtype = dtypes.int64
partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype)
for p in partitioned_dim_sizes)
inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)
self._partitioned_dim_sizes = partitioned_dim_sizes
self._inner_dim_sizes = inner_dim_sizes
def __repr__(self):
return ('RaggedTensorDynamicShape'
'(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' %
(self._partitioned_dim_sizes, self._inner_dim_sizes))
@staticmethod
def from_dim_sizes(dim_sizes):
"""Constructs a ragged shape from a list of dimension sizes.
This list contains a single tensor for each dimension, where the tensor
is a scalar if the dimension is uniform, or a vector if the dimension is
ragged.
Args:
dim_sizes: List of int32 or int64 scalars or vectors.
Returns:
A RaggedTensorDynamicShape.
"""
with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes',
[dim_sizes]):
dim_sizes = tuple(
ops.convert_to_tensor(size, preferred_dtype=dtypes.int64,
name='dim_sizes') for size in dim_sizes)
# Split the dimensions into partitioned & inner dimensions.
inner_split = 0
for dim, dim_size in enumerate(dim_sizes):
if dim_size.shape.ndims == 1:
inner_split = dim + 1
elif dim_size.shape.ndims != 0:
raise ValueError('Each dim_size must be a scalar or a vector')
return RaggedTensorDynamicShape(dim_sizes[:inner_split],
dim_sizes[inner_split:])
@classmethod
def from_tensor(cls, rt_input, dim_size_dtype=None):
"""Constructs a ragged shape for a potentially ragged tensor."""
with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]):
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
if not ragged_tensor.is_ragged(rt_input):
return cls([], array_ops.shape(rt_input))
else:
partitioned_dim_sizes = (
(rt_input.nrows(),) + rt_input.nested_row_lengths())
return RaggedTensorDynamicShape(
partitioned_dim_sizes,
array_ops.shape(rt_input.flat_values)[1:],
dim_size_dtype=dim_size_dtype)
def dimension_size(self, axis):
"""Returns the size of slices across the specified dimension."""
if not isinstance(axis, int):
raise TypeError('axis must be an integer')
partitioned_ndims = len(self._partitioned_dim_sizes)
if axis < partitioned_ndims:
return self._partitioned_dim_sizes[axis]
else:
return self._inner_dim_sizes[axis - partitioned_ndims]
def is_ragged(self, axis):
"""Returns true if the indicated dimension is ragged."""
if not isinstance(axis, int):
raise TypeError('axis must be an integer')
rank = self.rank
if axis < 0:
raise ValueError('Negative axis values are not supported')
elif rank is not None and axis >= rank:
raise ValueError('Expected axis=%s < rank=%s' % (axis, rank))
else:
return (axis > 0 and axis < len(self._partitioned_dim_sizes) and
self._partitioned_dim_sizes[axis].shape.ndims == 1)
@property
def rank(self):
"""The number of dimensions in this shape, or None if unknown."""
inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])
if inner_ndims is None:
return None
else:
return len(self._partitioned_dim_sizes) + inner_ndims
@property
def partitioned_dim_sizes(self):
"""The partitioned dimension sizes for this shape.
Returns:
A `list` of 0-D or 1-D integer `Tensor`.
"""
return self._partitioned_dim_sizes
@property
def inner_dim_sizes(self):
"""The inner dimension sizes for this shape.
Returns:
A 1-D integer `Tensor`.
"""
return self._inner_dim_sizes
@property
def num_partitioned_dimensions(self):
"""The number of partitioned dimensions in this shape."""
return len(self._partitioned_dim_sizes)
@property
def num_inner_dimensions(self):
"""The number of inner dimensions, or `None` if not statically known."""
return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])
@property
def dim_size_dtype(self):
"""DType used by this shape for dimension sizes."""
return self._inner_dim_sizes.dtype
def broadcast_to_rank(self, rank):
"""Adds leading size-1 dimensions to broadcast `self` to the given rank.
E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)`
is `[1, 1, 3, (D2), 4]`.
Args:
rank: The rank for the returned shape.
Returns:
A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions
have the same size as `self` and whose outer dimensions have size `1`.
Raises:
ValueError: If `self.rank` is unknown or greater than `rank`.
"""
if self.rank is None:
raise ValueError('Unable to broadcast: self.rank is unknown')
dims_to_add = rank - self.rank
if dims_to_add < 0:
raise ValueError('Unable to broadcast: rank=%d must be greater than '
'self.rank=%d.' % (rank, self.rank))
elif dims_to_add == 0:
return self
elif self._partitioned_dim_sizes:
partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes
return RaggedTensorDynamicShape(partitioned_dims, self._inner_dim_sizes)
else:
inner_dims = array_ops.concat(
[array_ops.ones([dims_to_add], self.dim_size_dtype),
self.inner_dim_sizes],
axis=0)
return RaggedTensorDynamicShape([], inner_dims)
def broadcast_dimension(self, axis, lengths):
"""Returns a shape that is broadcast-compatible with self & lengths.
* If dimension[axis] is uniform and lengths is a scalar, the check
that either lengths==1 or axis==1 or lengths==axis, and tile
dimension[axis] with tf.where(lengths==axis, 1, axis) repeats.
* If dimension[axis] is uniform and lengths is a vector, then check
that dimension[axis]==1, and raggedly tile dimension[axis] with
lengths repeats. (we can skip tiling if we statically know that
slice_lengths == 1??)
* If dimension[axis] is ragged and lengths is a scalar, then check
that lengths==1.
* If dimension[axis] is ragged and lengths is a vector, then check
that self.dimension_size(axis) == lengths.
Args:
axis: `int`. The dimension to broadcast.
lengths: 0-D or 1-D integer `Tensor`.
Returns:
A `RaggedTensorDynamicShape`.
"""
lengths = ragged_util.convert_to_int_tensor(
lengths, name='lengths', dtype=self.dim_size_dtype)
# Check whether lengths is a scalar (for uniform dimensions) or
# vector (for ragged dimensions).
if lengths.shape.ndims is None:
raise ValueError('lengths must have a known rank.')
elif lengths.shape.ndims > 1:
raise ValueError('lengths must be a scalar or vector')
else:
lengths_is_scalar = (lengths.shape.ndims == 0)
# Verify that the shapes are compatible.
if self.is_ragged(axis):
if lengths_is_scalar:
condition = math_ops.equal(lengths, 1)
else:
condition = math_ops.reduce_all(
math_ops.equal(lengths, self.dimension_size(axis)))
else:
axis_dim_size = self.dimension_size(axis)
if lengths_is_scalar:
condition = (
math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1)
| math_ops.equal(axis_dim_size, lengths))
else:
condition = math_ops.equal(axis_dim_size, 1)
broadcast_err = [
'Unable to broadcast: dimension size mismatch in dimension', axis,
'lengths=', lengths, 'dim_size=',
self.dimension_size(axis)
]
broadcast_check = control_flow_ops.Assert(
condition, data=broadcast_err, summarize=10)
with ops.control_dependencies([broadcast_check]):
# Partitioned dimensions:
if axis < self.num_partitioned_dimensions:
if self.is_ragged(axis):
# Use an identity op to make sure the check actually gets run.
return RaggedTensorDynamicShape(
self._partitioned_dim_sizes,
array_ops.identity(self.inner_dim_sizes))
else:
return self._broadcast_uniform_partitioned_dimension(axis, lengths)
# Inner dimensions:
else:
if lengths_is_scalar:
return self._broadcast_inner_dimension_to_uniform(axis, lengths)
else:
if axis == 0:
raise ValueError('Unable to broadcast: '
'outermost dimension must be uniform.')
return self._broadcast_inner_dimension_to_ragged(axis, lengths)
def num_slices_in_dimension(self, axis):
"""Returns the total number of slices across the indicated dimension."""
if axis < 0:
return constant_op.constant(1, dtype=self.dim_size_dtype)
elif self.is_ragged(axis):
return math_ops.reduce_sum(self._partitioned_dim_sizes[axis])
else:
return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1)
def _broadcast_uniform_partitioned_dimension(self, axis, lengths):
"""Broadcasts the partitioned dimension `axis` to match `lengths`."""
axis_dim_size = self.dimension_size(axis)
partitioned_sizes = list(self._partitioned_dim_sizes[:axis])
if lengths.shape.ndims == 0:
lengths = array_ops.where(
math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size)
repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1)
splits = array_ops.stack([0, self.num_slices_in_dimension(axis)])
else:
splits = math_ops.range(
array_ops.size(lengths, out_type=self.dim_size_dtype) + 1)
repeats = lengths
partitioned_sizes.append(lengths)
for dim_size in self._partitioned_dim_sizes[axis + 1:]:
if dim_size.shape.ndims == 0:
partitioned_sizes.append(dim_size)
splits *= dim_size
else:
partitioned_sizes.append(
ragged_util.repeat_ranges(dim_size, splits, repeats))
splits = array_ops.gather(
ragged_util.lengths_to_splits(dim_size), splits)
inner_sizes = self._inner_dim_sizes
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes)
def _broadcast_inner_dimension_to_uniform(self, axis, length):
"""Broadcasts the inner dimension `axis` to match `lengths`."""
dim_size = self.dimension_size(axis)
axis_in_inner_dims = axis - self.num_partitioned_dimensions
partitioned_sizes = self._partitioned_dim_sizes
inner_sizes = array_ops.concat([
self._inner_dim_sizes[:axis_in_inner_dims],
[array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)],
self._inner_dim_sizes[axis_in_inner_dims + 1:]
],
axis=0)
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes)
def _broadcast_inner_dimension_to_ragged(self, axis, lengths):
axis_in_inner_dims = axis - self.num_partitioned_dimensions
partitioned_sizes = (
self._partitioned_dim_sizes + tuple([
self._inner_dim_sizes[i] for i in range(axis_in_inner_dims)
]) + (lengths,))
inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:]
return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes)
def with_dim_size_dtype(self, dtype):
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('dtype must be int32 or int64')
if self.dim_size_dtype == dtype:
return self
return RaggedTensorDynamicShape(
[math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes],
math_ops.cast(self._inner_dim_sizes, dtype))
def broadcast_dynamic_shape(shape_x, shape_y):
"""Returns the shape formed by broadcasting two shapes to be compatible.
Args:
shape_x: A `RaggedTensorDynamicShape`
shape_y: A `RaggedTensorDynamicShape`
Returns:
A `RaggedTensorDynamicShape`.
Raises:
ValueError: If `shape_x` and `shape_y` are not broadcast-compatible.
"""
if not isinstance(shape_x, RaggedTensorDynamicShape):
raise TypeError('shape_x must be a RaggedTensorDynamicShape')
if not isinstance(shape_y, RaggedTensorDynamicShape):
raise TypeError('shape_y must be a RaggedTensorDynamicShape')
# Broadcast both shapes to have the same rank.
if shape_x.rank is None or shape_y.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
broadcast_rank = max(shape_x.rank, shape_y.rank)
shape_x = shape_x.broadcast_to_rank(broadcast_rank)
shape_y = shape_y.broadcast_to_rank(broadcast_rank)
# Broadcast dimensions one at a time, starting from the outermost dimension.
for axis in range(broadcast_rank):
shape_x = shape_x.broadcast_dimension(axis, shape_y.dimension_size(axis))
shape_y = shape_y.broadcast_dimension(axis, shape_x.dimension_size(axis))
return shape_x
def broadcast_to(rt_input, shape, broadcast_inner_dimensions=True):
"""Broadcasts a potentially ragged tensor to a ragged shape.
Tiles `rt_input` as necessary to match the given shape.
Behavior is undefined if `rt_input` is not broadcast-compatible with `shape`.
Args:
rt_input: The potentially ragged tensor to broadcast.
shape: A `RaggedTensorDynamicShape`
broadcast_inner_dimensions: If false, then inner dimensions will not be
tiled.
Returns:
A potentially ragged tensor whose values are taken from
`rt_input`, and whose shape matches `shape`.
"""
if not isinstance(shape, RaggedTensorDynamicShape):
raise TypeError('shape must be a RaggedTensorDynamicShape')
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)
# Broadcasting to a uniform shape.
if shape.num_partitioned_dimensions == 0:
return _broadcast_to_uniform_shape(rt_input, shape,
broadcast_inner_dimensions)
else:
return _broadcast_to_ragged_shape(rt_input, shape,
broadcast_inner_dimensions)
def _broadcast_to_uniform_shape(rt_input, shape, broadcast_inner_dimensions):
"""Broadcasts rt_input to the uniform shape `shape`."""
if isinstance(rt_input, ragged_tensor.RaggedTensor):
raise ValueError('Incompatible with shape: ragged rank mismatch')
if broadcast_inner_dimensions:
return array_ops.broadcast_to(rt_input, shape.inner_dim_sizes)
else:
return rt_input
def _broadcast_to_ragged_shape(rt_input, dst_shape, broadcast_inner_dimensions):
"""Broadcasts rt_input to the ragged shape `dst_shape`."""
# Check that rt_input and dst_shape have the same row_splits dtype.
if (isinstance(rt_input, ragged_tensor.RaggedTensor) and
rt_input.row_splits.dtype != dst_shape.dim_size_dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('rt_input and dst_shape have different row_split '
'dtypes; use RaggedTensor.with_row_splits_dtype() or '
'RaggedTensorDynamicShape.with_dim_size_dtype() to '
'convert to a compatible dtype.')
rt_input = rt_input.with_row_splits_dtype(dtypes.int64)
dst_shape = dst_shape.with_dim_size_dtype(dtypes.int64)
# dst_shape's rank and ragged_rank must be greater than or equal to rt_input's
if rt_input.shape.ndims is None or dst_shape.rank is None:
raise ValueError('Unable to broadcast: unknown rank')
if rt_input.shape.ndims > dst_shape.rank:
raise ValueError('Incompatible with shape: rank mismatch')
if (isinstance(rt_input, ragged_tensor.RaggedTensor) and
rt_input.ragged_rank >= dst_shape.num_partitioned_dimensions):
raise ValueError('Incompatible with shape: ragged rank mismatch')
src_shape = RaggedTensorDynamicShape.from_tensor(rt_input)
src_shape = src_shape.broadcast_to_rank(dst_shape.rank)
# Add dimensions to rt_input so its rank and ragged_rank matches dst_shape.
if dst_shape.rank > rt_input.shape.ndims:
if rt_input.shape.ndims < dst_shape.num_inner_dimensions + 1:
rt_input = array_ops.reshape(
rt_input, array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0))
for _ in range(dst_shape.rank - rt_input.shape.ndims):
if ragged_tensor.is_ragged(rt_input):
nrows = rt_input.nrows()
else:
nrows = array_ops.shape(rt_input,
out_type=dst_shape.dim_size_dtype)[0]
rt_input = ragged_tensor.RaggedTensor.from_row_lengths(rt_input, [nrows],
validate=False)
# Add ragged dimensions to match dst_shape.
if ragged_tensor.is_ragged(rt_input):
inner_rank_diff = (
rt_input.flat_values.shape.ndims - 1 - dst_shape.num_inner_dimensions)
if inner_rank_diff > 0:
rt_input = rt_input.with_flat_values(
ragged_tensor.RaggedTensor.from_tensor(
rt_input.flat_values, ragged_rank=inner_rank_diff,
row_splits_dtype=dst_shape.dim_size_dtype))
else:
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, ragged_rank=dst_shape.num_partitioned_dimensions - 1,
row_splits_dtype=dst_shape.dim_size_dtype)
# Do broadcasting for any dimensions that will remain uniform. We can do
# these all at once, since they're independent of one another.
multiples = [1] * dst_shape.rank
for axis in range(dst_shape.num_partitioned_dimensions):
if not src_shape.is_ragged(axis) and not dst_shape.is_ragged(axis):
src_size = src_shape.dimension_size(axis)
dst_size = dst_shape.dimension_size(axis)
if ((tensor_util.constant_value(src_size) in (1, None)) and
(tensor_util.constant_value(dst_size) != 1)):
multiples[axis] = array_ops.where(
math_ops.equal(src_size, 1), dst_size, 1)
if not all(isinstance(v, int) and v == 1 for v in multiples):
multiples = array_ops.stack(multiples, axis=0)
rt_input = ragged_array_ops.tile(rt_input, multiples)
if broadcast_inner_dimensions:
rt_input = rt_input.with_flat_values(
array_ops.reshape(
rt_input.flat_values,
array_ops.concat([[-1], dst_shape.inner_dim_sizes], axis=0)))
# Do broadcasting for dimensions that become ragged. We must do these from
# outermost to innermost.
for axis in range(dst_shape.num_partitioned_dimensions):
if not src_shape.is_ragged(axis) and dst_shape.is_ragged(axis):
dst_size = dst_shape.dimension_size(axis)
rt_input = _ragged_tile_axis(rt_input, axis, dst_size,
dst_shape.dim_size_dtype)
return rt_input
def _ragged_tile_axis(rt_input, axis, repeats, row_splits_dtype):
"""Tile a dimension of a RaggedTensor to match a ragged shape."""
assert axis > 0 # Outermost dimension may not be ragged.
if not ragged_tensor.is_ragged(rt_input):
rt_input = ragged_tensor.RaggedTensor.from_tensor(
rt_input, ragged_rank=1, row_splits_dtype=row_splits_dtype)
if axis > 1:
return rt_input.with_values(
_ragged_tile_axis(rt_input.values, axis - 1, repeats,
row_splits_dtype))
else:
src_row_splits = rt_input.nested_row_splits
src_row_lengths = rt_input.nested_row_lengths()
splits = src_row_splits[0]
dst_row_lengths = [repeats]
for i in range(1, len(src_row_lengths)):
dst_row_lengths.append(
ragged_util.repeat_ranges(src_row_lengths[i], splits, repeats))
splits = array_ops.gather(src_row_splits[i], splits)
dst_values = ragged_util.repeat_ranges(rt_input.flat_values, splits,
repeats)
return ragged_tensor.RaggedTensor.from_nested_row_lengths(
dst_values, dst_row_lengths, validate=False)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor_shape.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_range op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
def prod(values):
val = 1
for v in values:
val *= v
return val
# return reduce(lambda x, y: x * y, values, 1)
def mean(values):
return 1.0 * sum(values) / len(values)
def sqrt_n(values):
return 1.0 * sum(values) / math.sqrt(len(values))
@test_util.run_all_in_graph_and_eager_modes
class RaggedSegmentOpsTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def expected_value(self, data, segment_ids, num_segments, combiner):
"""Find the expected value for a call to ragged_segment_<aggregate>.
Args:
data: The input RaggedTensor, expressed as a nested python list.
segment_ids: The segment ids, as a python list of ints.
num_segments: The number of segments, as a python int.
combiner: The Python function used to combine values.
Returns:
The expected value, as a nested Python list.
"""
self.assertLen(data, len(segment_ids))
# Build an empty (num_segments x ncols) "grouped" matrix
ncols = max(len(row) for row in data)
grouped = [[[] for _ in range(ncols)] for row in range(num_segments)]
# Append values from data[row] to grouped[segment_ids[row]]
for row in range(len(data)):
for col in range(len(data[row])):
grouped[segment_ids[row]][col].append(data[row][col])
# Combine the values.
return [[combiner(values)
for values in grouped_row
if values]
for grouped_row in grouped]
@parameterized.parameters(
(ragged_math_ops.segment_sum, sum, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_sum, sum, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_sum, sum, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_sum, sum, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_prod, prod, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_prod, prod, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_prod, prod, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_prod, prod, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_min, min, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_min, min, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_min, min, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_min, min, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_max, max, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_max, max, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_max, max, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_max, max, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_mean, mean, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_mean, mean, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_mean, mean, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_mean, mean, [0, 0, 0, 10, 10, 10]),
)
def testRaggedSegment_Int(self, segment_op, combiner, segment_ids):
rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]]
rt = ragged_factory_ops.constant(rt_as_list)
num_segments = max(segment_ids) + 1
expected = self.expected_value(rt_as_list, segment_ids, num_segments,
combiner)
segmented = segment_op(rt, segment_ids, num_segments)
self.assertRaggedEqual(segmented, expected)
@parameterized.parameters(
(ragged_math_ops.segment_sum, sum, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_sum, sum, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_sum, sum, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_sum, sum, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_prod, prod, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_prod, prod, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_prod, prod, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_prod, prod, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_min, min, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_min, min, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_min, min, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_min, min, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_max, max, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_max, max, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_max, max, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_max, max, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_mean, mean, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_mean, mean, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_mean, mean, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_mean, mean, [0, 0, 0, 10, 10, 10]),
(ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 1, 1, 2, 2]),
(ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 0, 1, 1, 1]),
(ragged_math_ops.segment_sqrt_n, sqrt_n, [5, 4, 3, 2, 1, 0]),
(ragged_math_ops.segment_sqrt_n, sqrt_n, [0, 0, 0, 10, 10, 10]),
)
def testRaggedSegment_Float(self, segment_op, combiner, segment_ids):
rt_as_list = [[0., 1., 2., 3.], [4.], [], [5., 6.], [7.], [8., 9.]]
rt = ragged_factory_ops.constant(rt_as_list)
num_segments = max(segment_ids) + 1
expected = self.expected_value(rt_as_list, segment_ids, num_segments,
combiner)
segmented = segment_op(rt, segment_ids, num_segments)
self.assertRaggedAlmostEqual(segmented, expected, places=5)
def testRaggedRankTwo(self):
rt = ragged_factory_ops.constant([
[[111, 112, 113, 114], [121],], # row 0
[], # row 1
[[], [321, 322], [331]], # row 2
[[411, 412]] # row 3
]) # pyformat: disable
segment_ids1 = [0, 2, 2, 2]
segmented1 = ragged_math_ops.segment_sum(rt, segment_ids1, 3)
expected1 = [[[111, 112, 113, 114], [121]], # row 0
[], # row 1
[[411, 412], [321, 322], [331]] # row 2
] # pyformat: disable
self.assertRaggedEqual(segmented1, expected1)
segment_ids2 = [1, 2, 1, 1]
segmented2 = ragged_math_ops.segment_sum(rt, segment_ids2, 3)
expected2 = [[],
[[111+411, 112+412, 113, 114], [121+321, 322], [331]],
[]] # pyformat: disable
self.assertRaggedEqual(segmented2, expected2)
def testRaggedSegmentIds(self):
rt = ragged_factory_ops.constant([
[[111, 112, 113, 114], [121],], # row 0
[], # row 1
[[], [321, 322], [331]], # row 2
[[411, 412]] # row 3
]) # pyformat: disable
segment_ids = ragged_factory_ops.constant([[1, 2], [], [1, 1, 2], [2]])
segmented = ragged_math_ops.segment_sum(rt, segment_ids, 3)
expected = [[],
[111+321, 112+322, 113, 114],
[121+331+411, 412]] # pyformat: disable
self.assertRaggedEqual(segmented, expected)
def testShapeMismatchError1(self):
dt = constant_op.constant([1, 2, 3, 4, 5, 6])
segment_ids = ragged_factory_ops.constant([[1, 2], []])
self.assertRaisesRegexp(
ValueError, 'segment_ids.shape must be a prefix of data.shape, '
'but segment_ids is ragged and data is not.',
ragged_math_ops.segment_sum, dt, segment_ids, 3)
def testShapeMismatchError2(self):
rt = ragged_factory_ops.constant([
[[111, 112, 113, 114], [121]], # row 0
[], # row 1
[[], [321, 322], [331]], # row 2
[[411, 412]] # row 3
]) # pyformat: disable
segment_ids = ragged_factory_ops.constant([[1, 2], [1], [1, 1, 2], [2]])
# Error is raised at graph-building time if we can detect it then.
self.assertRaisesRegexp(
errors.InvalidArgumentError,
'segment_ids.shape must be a prefix of data.shape.*',
ragged_math_ops.segment_sum, rt, segment_ids, 3)
# Otherwise, error is raised when we run the graph.
segment_ids2 = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.placeholder_with_default(segment_ids.values, None),
array_ops.placeholder_with_default(segment_ids.row_splits, None))
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'segment_ids.shape must be a prefix of data.shape.*'):
self.evaluate(ragged_math_ops.segment_sum(rt, segment_ids2, 3))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_segment_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def map_fn(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None):
"""map on the list of tensors unpacked from `elems` on dimension 0.
The simplest version of `map_fn` repeatedly applies the callable `fn` to a
sequence of elements from first to last. The elements are made of the
tensors unpacked from `elems`. `dtype` is the data type of the return
value of `fn`. Users must provide `dtype` if it is different from
the data type of `elems`.
Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
This method also allows multi-arity `elems` and output of `fn`. If `elems`
is a (possibly nested) list or tuple of tensors, then each of these tensors
must have a matching first (unpack) dimension. The signature of `fn` may
match the structure of `elems`. That is, if `elems` is
`(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
`fn = lambda (t1, [t2, t3, [t4, t5]]):`.
Furthermore, `fn` may emit a different structure than its input. For example,
`fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
the `dtype` parameter is not optional: `dtype` must be a type or (possibly
nested) tuple of types matching the output of `fn`.
To apply a functional operation to the nonzero elements of a SparseTensor
one of the following methods is recommended. First, if the function is
expressible as TensorFlow ops, use
```python
result = SparseTensor(input.indices, fn(input.values), input.dense_shape)
```
If, however, the function is not expressible as a TensorFlow op, then use
```python
result = SparseTensor(
input.indices, map_fn(fn, input.values), input.dense_shape)
```
instead.
When executing eagerly, map_fn does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.contrib.eager.defun` decorator,
```python
# Assume the function being used in map_fn is fn.
# To ensure map_fn calls fn in parallel, use the defun decorator.
@tf.contrib.eager.defun
def func(tensor):
return tf.map_fn(fn, tensor)
```
Note that if you use the defun decorator, any non-TensorFlow Python code
that you may have written in your function won't get executed. See
`tf.contrib.eager.defun` for more details. The recommendation would be to
debug without defun but switch to defun to get performance benefits of
running map_fn in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`. Its output must have the
same structure as `dtype` if one is provided, otherwise it must have the
same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be applied to `fn`.
dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
of Tensors differing from the structure of `elems`, then `dtype` is not
optional and must have the same structure as the output of `fn`. Use
`RaggedTensorType` to declare an output of type `RaggedTensor`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) True enables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
Returns:
A possibly nested sequence of potentially ragged tensors. Each
tensor packs the results of applying `fn` to tensors unpacked from `elems`
along the first dimension, from first to last.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `dtype` do not match, or if elems is a SparseTensor.
ValueError: if the lengths of the output of `fn` and `dtype` do not match.
#### Examples:
```python
elems = np.array([1, 2, 3, 4, 5, 6])
squares = map_fn(lambda x: x * x, elems)
# squares == [1, 4, 9, 16, 25, 36]
```
```python
elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
# alternate == [-1, 2, -3]
```
```python
elems = np.array([1, 2, 3])
alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
# alternates[0] == [1, 2, 3]
# alternates[1] == [-1, -2, -3]
```
```python
elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]])
mean = map_fn(tf.reduce_mean, elems)
# mean == [2, 4, 6]
```
```python
elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64)
out = map_fn(fn=lambda x: x+1, elems,
dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0))
# out = ragged.constant([[2, 3, 4], [5, 6], [7, 8]])
```
"""
if not callable(fn):
raise TypeError("fn must be callable.")
if isinstance(elems, sparse_tensor.SparseTensor):
raise TypeError(
"To perform a map on the values of a sparse tensor use either "
" SparseTensor(input.indices, fn(input.values), input.dense_shape) or "
" SparseTensor(input.indices, map_fn(fn, input.values), "
"input.dense_shape)")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
if not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.contrib.eager.defun to execute fn in "
"parallel.", 1)
parallel_iterations = 1
input_is_sequence = nest.is_sequence(elems)
input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]
def input_pack(x):
return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]
elems_flat = input_flatten(elems)
elems_flat = ragged_tensor.match_row_splits_dtypes(*elems_flat)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ragged_tensor.convert_to_tensor_or_ragged_tensor(elem, name="elem")
for elem in elems_flat
]
# We can either infer the output, or we can assume that it will be the same
# as the input structure.
dtype = dtype or input_pack([elem.dtype for elem in elems_flat])
# Find the number of iterations, n may be known statically.
if isinstance(elems_flat[0], ragged_tensor.RaggedTensor):
n = elems_flat[0].nrows(out_type=dtypes.int32)
else:
static_shape = elems_flat[0].shape
if static_shape.ndims is not None and static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError(
"elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars")
n = (tensor_shape.dimension_value(static_shape[0]) or
array_ops.shape(elems_flat[0])[0])
n = math_ops.cast(n, dtype=dtypes.int32)
# Create a flat list of TAs.
# Flatten the dtype structure to a list.
dtype_flat = nest.flatten(dtype)
# decompose to components
dtype_components = [_maybe_decompose_dtype(d) for d in dtype_flat]
dtype_components_flat = nest.flatten(dtype_components)
# Create TensorArrays.
accs_ta = [
tensor_array_ops.TensorArray(
dtype=t, dynamic_size=False, infer_shape=infer_shape, size=n)
for t in dtype_components_flat
]
i = constant_op.constant(0, dtype=dtypes.int32)
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if dtype and packed_fn_values structure do not match
ValueType: if dtype and packed_fn_values lengths do not match
"""
# Get Tensors or RaggedTensors sliced at i, then pack it back to the
# original structure.
packed_values = input_pack([elem_flat[i] for elem_flat in elems_flat])
packed_fn_values = fn(packed_values)
# Check that the structure of the output matches what was declared or
# inferred.
# nest.assert_same_structure(dtype or elems, packed_fn_values)
# Flatten and decompose to a list of Tensors
flat_fn_values = nest.flatten(packed_fn_values)
# If we declared that we are expecting a RaggedTensor output, but we get a
# Tensor output. We should try to convert it to a RaggedTensor.
flat_fn_composite_tensors = list(
_convert_declared(flat_fn_values, dtype_flat))
flat_fn_components = [
_maybe_decompose_tensor(t) for t in flat_fn_composite_tensors
]
flat_fn_tensors = nest.flatten(flat_fn_components)
# Write to TAs.
tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_tensors)]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n, compute, (i, accs_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
# Pack back into a list of components
results_as_components = nest.pack_sequence_as(dtype_components, r_a)
# Stack TensorArrays for Tensor outputs, and concat RaggedTensor outputs.
def _stack_or_concat(e):
if isinstance(e, _RaggedTensorComponents):
return _concat_ragged_tensor_components(e)
else:
result = e.stack()
return result
results_flat_components = [
_stack_or_concat(e) for e in results_as_components
]
results_packed = [
_maybe_recompose_tensor(c) for c in results_flat_components
]
results_packed = nest.pack_sequence_as(dtype, results_packed)
return results_packed
class _RaggedTensorComponents(
collections.namedtuple(
"_RaggedTensorComponents",
["flat_values", "nested_row_lengths", "outer_row_length"])):
"""A namedtuple of components which represent a `RaggedTensor`.
_RaggedTensorComponents is a list of components which can be used to create a
`RaggedTensor`. Use this class to represent a `RaggedTensor` in situations
where nest.flatten and nest.pack_sequence_as should decompose ragged tensors
into their components..
The following are a list of components for a `RaggedTensor`:
flat_values: The flat and inner values of a RaggedTensor. This could be
a `Tensor`, a `TensorArray`, or a data type.
nested_row_lengths: a tuple containing the row lengths of each rank. The
elements of the tuple could be `Tensor`s or `TensorArray`s.
outer_row_length: a `Tensor` or `TensorArray` containing the row length of the
`RaggedTensor`'s outermost dimension.
See `RaggedTensor` for more details of the use of each component.
"""
__slots__ = ()
def _concat_ragged_tensor_components(rt_ta):
flat_values = rt_ta.flat_values.concat()
nested_row_lengths = tuple(
row_lengths_ta.concat() for row_lengths_ta in rt_ta.nested_row_lengths)
outer_row_length = rt_ta.outer_row_length.concat()
return _RaggedTensorComponents(
flat_values=flat_values,
nested_row_lengths=nested_row_lengths,
outer_row_length=outer_row_length)
def _maybe_decompose_tensor(rt):
"""Decompose tensors to their composite tensors."""
if not isinstance(rt, ragged_tensor.RaggedTensor):
return rt
# The three component pieces we need:
# - inner values
flat_values = rt.flat_values
# - row_splits of the RT
splits = rt.nested_row_splits
nested_row_lengths = tuple(split[1:] - split[:-1] for split in splits)
# - outer row length
outer_row_length = array_ops.expand_dims(rt.nrows(), axis=0)
return _RaggedTensorComponents(
flat_values=flat_values,
nested_row_lengths=nested_row_lengths,
outer_row_length=outer_row_length,
)
def _maybe_recompose_tensor(t):
"""Reconstructs a _RaggedTensorComponents into a RaggedTensor."""
if not isinstance(t, _RaggedTensorComponents):
return t
values = t.flat_values
nested_row_lengths = tuple(t.nested_row_lengths)
for nested_row_length in reversed(nested_row_lengths):
values = ragged_tensor.RaggedTensor.from_row_lengths(
values, nested_row_length, validate=False)
return ragged_tensor.RaggedTensor.from_row_lengths(values, t.outer_row_length,
validate=False)
def _maybe_decompose_dtype(d):
"""Decompose dtypes into composite tensors (if necessary)."""
if not isinstance(d, ragged_tensor.RaggedTensorType):
return d
result = _RaggedTensorComponents(
flat_values=d.dtype,
nested_row_lengths=tuple(
d.row_splits_dtype for i in range(d.ragged_rank - 1)),
outer_row_length=d.row_splits_dtype,
)
return result
def _convert_declared(fn_output_flat, output_declared):
"""Convert outputs which are `Tensor`s into `_RaggedTensorComponents`."""
for current, declared in zip(fn_output_flat, output_declared):
if isinstance(declared, ragged_tensor.RaggedTensorType):
yield _convert_declared_ragged(current, declared)
else:
yield current
def _convert_declared_ragged(current, declared):
"""Converts an output with RaggedTensorType into a _RaggedTensorComponents."""
# Check that the ragged ranks match up.
# + 1 to account for the rank of the outermost dimension.
current_ragged_rank = getattr(current, "ragged_rank", 0)
if declared.ragged_rank != current_ragged_rank + 1:
raise ValueError(
"The declared ragged rank (%d) mismatches the result (%d)" %
(declared.ragged_rank, current_ragged_rank + 1))
# Check that dtypes match up.
if declared.dtype != current.dtype:
raise ValueError(
"The declared dtype (%s) mismatches the result (%s)" %
(declared.dtype, current.dtype))
if (isinstance(current, ragged_tensor.RaggedTensor) and
declared.row_splits_dtype != current.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError(
"The declared row_splits dtype (%s) mismatches the result (%s)."
" Use RaggedTensor.with_row_splits_dtype to convert it."
% (declared.row_splits_dtype, current.row_splits.dtype))
current = current.with_row_splits_dtype(declared.row_splits_dtype)
if isinstance(current, ragged_tensor.RaggedTensor):
return current
else:
nrows = array_ops.shape(current, out_type=declared.row_splits_dtype)[0]
row_length = array_ops.expand_dims(nrows, axis=0)
return _RaggedTensorComponents(
flat_values=current,
nested_row_lengths=(),
outer_row_length=row_length)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_map_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor.from_sparse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToSparseOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExample(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]],
values=[1, 2, 3, 4, 5],
dense_shape=[4, 3])
rt = RaggedTensor.from_sparse(st)
self.assertRaggedEqual(rt, [[1, 2, 3], [4], [], [5]])
def testEmpty(self):
st = sparse_tensor.SparseTensor(
indices=array_ops.zeros([0, 2], dtype=dtypes.int64),
values=[],
dense_shape=[4, 3])
rt = RaggedTensor.from_sparse(st)
self.assertRaggedEqual(rt, [[], [], [], []])
def testBadSparseTensorRank(self):
st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3])
self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st1)
st2 = sparse_tensor.SparseTensor(
indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3])
self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st2)
if not context.executing_eagerly():
st3 = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=[0],
dense_shape=array_ops.placeholder(dtypes.int64))
self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
RaggedTensor.from_sparse, st3)
def testGoodPartialSparseTensorRank(self):
if not context.executing_eagerly():
st1 = sparse_tensor.SparseTensor(
indices=[[0, 0]],
values=[0],
dense_shape=array_ops.placeholder(dtypes.int64))
st2 = sparse_tensor.SparseTensor(
indices=array_ops.placeholder(dtypes.int64),
values=[0],
dense_shape=[4, 3])
# Shouldn't throw ValueError
RaggedTensor.from_sparse(st1)
RaggedTensor.from_sparse(st2)
def testNonRaggedSparseTensor(self):
# "index_suffix" means the value of the innermost dimension of the index
# (i.e., indices[i][-1]).
# See comments in _assert_sparse_indices_are_ragged_right() for more
# details/background.
# index_suffix of first index is not zero.
st1 = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st1))
# index_suffix of an index that starts a new row is not zero.
st2 = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st2))
# index_suffix of an index that continues a row skips a cell.
st3 = sparse_tensor.SparseTensor(
indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'.*SparseTensor is not right-ragged'):
self.evaluate(RaggedTensor.from_sparse(st3))
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.rank op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRankOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Rank 0
dict(
test_input=1,
expected_rank=0,
),
# Rank 1
dict(
test_input=[1],
expected_rank=1,
),
dict(
test_input=[1, 2, 3, 4],
expected_rank=1,
),
# Rank 2
dict(
test_input=[[1], [2], [3]],
expected_rank=2,
),
# Rank 3
dict(
test_input=[[[1], [2, 3]], [[4], [5, 6, 7]]],
expected_rank=3,
),
# Rank 3, ragged_rank=2
dict(
test_input=[[[1], [2, 3], [10, 20]],
[[4], [5, 6, 7]]],
expected_rank=3,
ragged_rank=2,
),
# Rank 4, ragged_rank=3 with dimensions: {2, (1, 2), (2), (1, 2)}
dict(
test_input=[[[[1], [2]]],
[[[3, 4], [5, 6]], [[7, 8], [9, 10]]]],
expected_rank=4,
),
# Rank 4, ragged_rank=2 with dimensions: {2, (1, 2), (1, 2), 2}
dict(
test_input=[
[[[1, 2]]],
[[[5, 6], [7, 8]],
[[9, 10], [11, 12]]]],
expected_rank=4,
ragged_rank=2,
),
])
def testRaggedRank(self, test_input, expected_rank, ragged_rank=None):
test_input = ragged_factory_ops.constant(
test_input, ragged_rank=ragged_rank)
self.assertAllEqual(ragged_array_ops.rank(
test_input), expected_rank)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_rank_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python-style indexing and slicing for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
def ragged_tensor_getitem(self, key):
"""Returns the specified piece of this RaggedTensor.
Supports multidimensional indexing and slicing, with one restriction:
indexing into a ragged inner dimension is not allowed. This case is
problematic because the indicated value may exist in some rows but not
others. In such cases, it's not obvious whether we should (1) report an
IndexError; (2) use a default value; or (3) skip that value and return a
tensor with fewer rows than we started with. Following the guiding
principles of Python ("In the face of ambiguity, refuse the temptation to
guess"), we simply disallow this operation.
Any dimensions added by `array_ops.newaxis` will be ragged if the following
dimension is ragged.
Args:
self: The RaggedTensor to slice.
key: Indicates which piece of the RaggedTensor to return, using standard
Python semantics (e.g., negative values index from the end). `key`
may have any of the following types:
* `int` constant
* Scalar integer `Tensor`
* `slice` containing integer constants and/or scalar integer
`Tensor`s
* `Ellipsis`
* `tf.newaxis`
* `tuple` containing any of the above (for multidimentional indexing)
Returns:
A `Tensor` or `RaggedTensor` object. Values that include at least one
ragged dimension are returned as `RaggedTensor`. Values that include no
ragged dimensions are returned as `Tensor`. See above for examples of
expressions that return `Tensor`s vs `RaggedTensor`s.
Raises:
ValueError: If `key` is out of bounds.
ValueError: If `key` is not supported.
TypeError: If the indices in `key` have an unsupported type.
Examples:
```python
>>> # A 2-D ragged tensor with 1 ragged dimension.
>>> rt = ragged.constant([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g']])
>>> rt[0].eval().tolist() # First row (1-D `Tensor`)
['a', 'b', 'c']
>>> rt[:3].eval().tolist() # First three rows (2-D RaggedTensor)
[['a', 'b', 'c'], ['d', 'e'], '[f'], [g']]
>>> rt[3, 0].eval().tolist() # 1st element of 4th row (scalar)
'g'
>>> # A 3-D ragged tensor with 2 ragged dimensions.
>>> rt = ragged.constant([[[1, 2, 3], [4]],
... [[5], [], [6]],
... [[7]],
... [[8, 9], [10]]])
>>> rt[1].eval().tolist() # Second row (2-D RaggedTensor)
[[5], [], [6]]
>>> rt[3, 0].eval().tolist() # First element of fourth row (1-D Tensor)
[8, 9]
>>> rt[:, 1:3].eval().tolist() # Items 1-3 of each row (3-D RaggedTensor)
[[[4]], [[], [6]], [], [[10]]]
>>> rt[:, -1:].eval().tolist() # Last item of each row (3-D RaggedTensor)
[[[4]], [[6]], [[7]], [[10]]]
```
"""
scope_tensors = [self] + list(_tensors_in_key_list(key))
if isinstance(key, (list, tuple)):
key = list(key)
else:
key = [key]
with ops.name_scope(None, "RaggedGetItem", scope_tensors):
return _ragged_getitem(self, key)
def _ragged_getitem(rt_input, key_list):
"""Helper for indexing and slicing ragged tensors with __getitem__().
Extracts the specified piece of the `rt_input`. See
`RaggedTensor.__getitem__` for examples and restrictions.
Args:
rt_input: The `RaggedTensor` from which a piece should be returned.
key_list: The list of keys specifying which piece to return. Each key
corresponds with a separate dimension.
Returns:
The indicated piece of rt_input.
Raises:
ValueError: If `key_list` is not supported.
TypeError: If any keys in `key_list` have an unsupported type.
"""
if not key_list:
return rt_input
row_key = key_list[0]
inner_keys = key_list[1:]
if row_key is Ellipsis:
expanded_key_list = _expand_ellipsis(key_list, rt_input.shape.ndims)
return _ragged_getitem(rt_input, expanded_key_list)
# Adding a new axis: Get rt_input[inner_keys], and wrap it in a RaggedTensor
# that puts all values in a single row.
if row_key is array_ops.newaxis:
inner_rt = _ragged_getitem(rt_input, inner_keys)
nsplits = array_ops.shape(inner_rt.row_splits,
out_type=inner_rt.row_splits.dtype)[0]
return ragged_tensor.RaggedTensor.from_row_splits(
inner_rt, array_ops.stack([0, nsplits - 1]), validate=False)
# Slicing a range of rows: first slice the outer dimension, and then
# call `_ragged_getitem_inner_dimensions` to handle the inner keys.
if isinstance(row_key, slice):
sliced_rt_input = _slice_ragged_row_dimension(rt_input, row_key)
return _ragged_getitem_inner_dimensions(sliced_rt_input, inner_keys)
# Indexing a single row: slice values to get the indicated row, and then
# use a recursive call to __getitem__ to handle the inner keys.
else:
starts = rt_input.row_splits[:-1]
limits = rt_input.row_splits[1:]
if context.executing_eagerly():
# In python, __getitem__ should throw IndexError for out of bound
# indices. This will allow iteration run correctly as python will
# translate IndexError into StopIteration for next()/__next__().
# Below is an example:
# import tensorflow as tf
# r = tf.ragged.constant([[1., 2.], [3., 4., 5.], [6.]])
# for elem in r:
# print(elem)
# In non eager mode, the exception is thrown when session runs
# so we don't know if out of bound happens before.
# In eager mode, however, it is possible to find out when to
# throw out of bound IndexError.
# In the following row_key >= len(starts) is checked. In case of
# TypeError which happens when row_key is not an integer, the exception
# will simply be ignored as it will be processed later anyway.
try:
if int(row_key) >= len(starts):
raise IndexError("Row key {} out of bounds".format(row_key))
except (TypeError, ValueError):
pass
row = rt_input.values[starts[row_key]:limits[row_key]]
return row.__getitem__(inner_keys)
def _slice_ragged_row_dimension(rt_input, row_key):
"""Slice the outer dimension of `rt_input` according to the given `slice`.
Args:
rt_input: The `RaggedTensor` to slice.
row_key: The `slice` object that should be used to slice `rt_input`.
Returns:
A `RaggedTensor` containing the indicated slice of `rt_input`.
"""
if row_key.start is None and row_key.stop is None and row_key.step is None:
return rt_input
# Use row_key to slice the starts & limits.
new_starts = rt_input.row_splits[:-1][row_key]
new_limits = rt_input.row_splits[1:][row_key]
zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype)
# If there's no slice step, then we can just select a single continuous
# span of `ragged.values(rt_input)`.
if row_key.step is None or row_key.step == 1:
# Construct the new splits. If new_starts and new_limits are empty,
# then this reduces to [0]. Otherwise, this reduces to:
# concat([[new_starts[0]], new_limits])
new_splits = array_ops.concat(
[zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits],
axis=0)
values_start = new_splits[0]
values_limit = new_splits[-1]
return ragged_tensor.RaggedTensor.from_row_splits(
rt_input.values[values_start:values_limit], new_splits - values_start,
validate=False)
# If there is a slice step (aka a strided slice), then use ragged_gather to
# collect the necessary elements of `ragged.values(rt_input)`.
else:
return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1,
rt_input.values)
def _ragged_getitem_inner_dimensions(rt_input, key_list):
"""Retrieve inner dimensions, keeping outermost dimension unchanged.
Args:
rt_input: The `RaggedTensor` or `Tensor` from which a piece should be
extracted.
key_list: The __getitem__ keys for slicing the inner dimensions.
Returns:
A `RaggedTensor`.
Raises:
ValueError: If key_list is not supported.
"""
if not key_list:
return rt_input
if isinstance(rt_input, ops.Tensor):
return rt_input.__getitem__([slice(None, None, None)] + key_list)
column_key = key_list[0]
if column_key is Ellipsis:
expanded_key_list = _expand_ellipsis(key_list, rt_input.values.shape.ndims)
return _ragged_getitem_inner_dimensions(rt_input, expanded_key_list)
# Adding a new axis to a ragged inner dimension: recursively get the inner
# dimensions of rt_input with key_list[1:], and then wrap the result in a
# RaggedTensor that puts each value in its own row.
if column_key is array_ops.newaxis:
inner_rt = _ragged_getitem_inner_dimensions(rt_input, key_list[1:])
nsplits = array_ops.shape(inner_rt.row_splits,
out_type=inner_rt.row_splits.dtype)[0]
return ragged_tensor.RaggedTensor.from_row_splits(inner_rt,
math_ops.range(nsplits),
validate=False)
# Slicing a range of columns in a ragged inner dimension. We use a
# recursive call to process the values, and then assemble a RaggedTensor
# with those values.
if isinstance(column_key, slice):
if (column_key.start is None and column_key.stop is None and
column_key.step is None):
# Trivial slice: recursively process all values, & splits is unchanged.
return rt_input.with_values(
_ragged_getitem_inner_dimensions(rt_input.values, key_list[1:]))
else:
# Nontrivial slice: use ragged_gather to extract the indicated slice as
# a new RaggedTensor (inner_rt), and then recursively process its values.
# The splits can be taken from inner_rt.row_splits().
inner_rt_starts = rt_input.row_splits[:-1]
inner_rt_limits = rt_input.row_splits[1:]
if column_key.start is not None and column_key.start != 0:
inner_rt_starts = _add_offset_to_ranges(
column_key.start, rt_input.row_splits[:-1], rt_input.row_splits[1:])
if column_key.stop is not None and column_key.stop != 0:
inner_rt_limits = _add_offset_to_ranges(
column_key.stop, rt_input.row_splits[:-1], rt_input.row_splits[1:])
inner_rt = _build_ragged_tensor_from_value_ranges(
inner_rt_starts, inner_rt_limits, column_key.step, rt_input.values)
return inner_rt.with_values(
_ragged_getitem_inner_dimensions(inner_rt.values, key_list[1:]))
# Indexing a single column in a ragged inner dimension: raise an Exception.
# See RaggedTensor.__getitem__.__doc__ for an explanation of why indexing
# into a ragged inner dimension is problematic.
else:
raise ValueError("Cannot index into an inner ragged dimension.")
def _expand_ellipsis(key_list, num_remaining_dims):
"""Expands the ellipsis at the start of `key_list`.
Assumes that the first element of `key_list` is Ellipsis. This will either
remove the Ellipsis (if it corresponds to zero indices) or prepend a new
`slice(None, None, None)` (if it corresponds to more than zero indices).
Args:
key_list: The arguments to `__getitem__()`.
num_remaining_dims: The number of dimensions remaining.
Returns:
A copy of `key_list` with he ellipsis expanded.
Raises:
ValueError: If ragged_rank.shape.ndims is None
IndexError: If there are too many elements in `key_list`.
"""
if num_remaining_dims is None:
raise ValueError("Ellipsis not supported for unknown shape RaggedTensors")
num_indices = sum(1 for idx in key_list if idx is not array_ops.newaxis)
if num_indices > num_remaining_dims + 1:
raise IndexError("Too many indices for RaggedTensor")
elif num_indices == num_remaining_dims + 1:
return key_list[1:]
else:
return [slice(None, None, None)] + key_list
def _tensors_in_key_list(key_list):
"""Generates all Tensors in the given slice spec."""
if isinstance(key_list, ops.Tensor):
yield key_list
if isinstance(key_list, (list, tuple)):
for v in key_list:
for tensor in _tensors_in_key_list(v):
yield tensor
if isinstance(key_list, slice):
for tensor in _tensors_in_key_list(key_list.start):
yield tensor
for tensor in _tensors_in_key_list(key_list.stop):
yield tensor
for tensor in _tensors_in_key_list(key_list.step):
yield tensor
def _build_ragged_tensor_from_value_ranges(starts, limits, step, values):
"""Returns a `RaggedTensor` containing the specified sequences of values.
Returns a RaggedTensor `output` where:
```python
output.shape[0] = starts.shape[0]
output[i] = values[starts[i]:limits[i]:step]
```
Requires that `starts.shape == limits.shape` and
`0 <= starts[i] <= limits[i] <= values.shape[0]`.
Args:
starts: 1D integer Tensor specifying the start indices for the sequences of
values to include.
limits: 1D integer Tensor specifying the limit indices for the sequences of
values to include.
step: Integer value specifying the step size for strided slices.
values: The set of values to select from.
Returns:
A `RaggedTensor`.
Raises:
ValueError: Until the prerequisite ops are checked in.
"""
# Use `ragged_range` to get the index of each value we should include.
if step is None:
step = 1
step = ops.convert_to_tensor(step, name="step")
if step.dtype.is_integer:
step = math_ops.cast(step, starts.dtype)
else:
raise TypeError("slice strides must be integers or None")
value_indices = ragged_math_ops.range(starts, limits, step,
row_splits_dtype=starts.dtype)
# Use `ragged_gather` or `array_ops.gather` to collect the values.
if isinstance(values, ragged_tensor.RaggedTensor):
gathered_values = ragged_gather_ops.gather(
params=values, indices=value_indices.values)
else:
gathered_values = array_ops.gather(
params=values, indices=value_indices.values)
# Assemble the RaggedTensor from splits & values.
return value_indices.with_values(gathered_values)
def _add_offset_to_ranges(offset, starts, limits):
"""Adds an indexing offset to each of the specified ranges.
If offset>=0, then return output[i]=min(starts[i]+offset, limits[i])
If offset<0, then return output[i]=max(limits[i]+offset, starts[i])
Args:
offset: The offset to add. None, or an int, or a scalar Tensor.
starts: 1-D integer tensor containing start indices.
limits: 1-D integer tensor containing limit indices.
Returns:
A 1-D integer tensor.
"""
def map_positive_offset(offset):
return math_ops.minimum(starts + offset, limits)
def map_negative_offset(offset):
return math_ops.maximum(limits + offset, starts)
if isinstance(offset, ops.Tensor):
offset = math_ops.cast(offset, starts.dtype)
return control_flow_ops.cond(offset >= 0,
lambda: map_positive_offset(offset),
lambda: map_negative_offset(offset))
elif isinstance(offset, int):
return (map_positive_offset(offset)
if offset > 0 else map_negative_offset(offset))
else:
raise TypeError("slice offsets must be integers or None")
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_getitem.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
class RaggedTensorTestCase(test_util.TensorFlowTestCase):
"""Base class for RaggedTensor test cases."""
def _GetPyList(self, a):
"""Converts a to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def assertRaggedEqual(self, a, b):
"""Asserts that two potentially ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank)
def assertRaggedAlmostEqual(self, a, b, places=7):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertNestedListAlmostEqual(a_list, b_list, places, context='value')
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank)
def assertNestedListAlmostEqual(self, a, b, places=7, context='value'):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), 'Length differs for %s' % context)
for i in range(len(a)):
self.assertNestedListAlmostEqual(a[i], b[i], places,
'%s[%s]' % (context, i))
else:
self.assertAlmostEqual(
a, b, places,
'%s != %s within %s places at %s' % (a, b, places, context))
def eval_to_list(self, tensor):
value = self.evaluate(tensor)
if ragged_tensor.is_ragged(value):
return value.to_list()
elif isinstance(value, np.ndarray):
return value.tolist()
else:
return value
def _eval_tensor(self, tensor):
if ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
else:
return test_util.TensorFlowTestCase._eval_tensor(self, tensor)
@staticmethod
def _normalize_pylist(item):
"""Convert all (possibly nested) np.arrays contained in item to list."""
# convert np.arrays in current level to list
if np.ndim(item) == 0:
return item
level = (x.tolist() if isinstance(x, np.ndarray) else x for x in item)
_normalize = RaggedTensorTestCase._normalize_pylist
return [_normalize(el) if np.ndim(el) != 0 else el for el in level]
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_test_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.tile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTileOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
#=========================================================================
# Docstring Example
#=========================================================================
dict(
descr='docstring example: ragged_rank=1, repeat axes 0 and 1',
rt_input=[[1, 2], [3]],
multiples=[3, 2],
expected=[
[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]],
),
#=========================================================================
# rank=3, ragged_rank=2
#=========================================================================
dict(
descr='rank=3, ragged_rank=2, repeat axis 0',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[2, 1, 1],
expected=[[[1, 2], [3]], [], [[4]],
[[1, 2], [3]], [], [[4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat axis 1',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[1, 2, 1],
expected=[[[1, 2], [3], [1, 2], [3]], [], [[4], [4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat axis 2',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[1, 1, 2],
expected=[[[1, 2, 1, 2], [3, 3]], [], [[4, 4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat axes 0 and 1',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[2, 2, 1],
expected=[[[1, 2], [3], [1, 2], [3]], [], [[4], [4]],
[[1, 2], [3], [1, 2], [3]], [], [[4], [4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat axes 0 and 2',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[2, 1, 2],
expected=[[[1, 2, 1, 2], [3, 3]], [], [[4, 4]],
[[1, 2, 1, 2], [3, 3]], [], [[4, 4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat axes 1 and 2',
rt_input=[[[1, 2], [3]], [], [[4]]],
multiples=[1, 2, 2],
expected=[[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]],
[], [[4, 4], [4, 4]]]),
dict(
descr='rank=3, ragged_rank=2, repeat all axes',
rt_input=[[['a', 'b'], ['c']], [], [['d']]],
multiples=[4, 3, 2],
expected=[[[b'a', b'b']*2, [b'c']*2]*3, []*3, [[b'd']*2]*3]*4),
#=========================================================================
# rank=3, ragged_rank=1
#=========================================================================
dict(
descr='rank=3, ragged_rank=1, repeat axis 0',
ragged_rank=1,
rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]],
multiples=[2, 1, 1],
expected=[[[1, 2], [3, 4]], [], [[5, 6]],
[[1, 2], [3, 4]], [], [[5, 6]]]),
dict(
descr='rank=3, ragged_rank=1, repeat axis 1',
ragged_rank=1,
rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]],
multiples=[1, 2, 1],
expected=[[[1, 2], [3, 4], [1, 2], [3, 4]], [], [[5, 6], [5, 6]]]),
dict(
descr='rank=3, ragged_rank=1, repeat axis 2',
ragged_rank=1,
rt_input=[[[1, 2], [3, 4]], [], [[5, 6]]],
multiples=[1, 1, 2],
expected=[[[1, 2, 1, 2], [3, 4, 3, 4]], [], [[5, 6, 5, 6]]]),
#=========================================================================
# rank=4, ragged_rank=3
#=========================================================================
dict(
descr='rank=4, ragged_rank=3, repeat axis 0',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[2, 1, 1, 1],
expected=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]],
[[[1], [2]], [[3]]], [[]], [[[4, 5]]]]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 1',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 2, 1, 1],
expected=[[[[1], [2]], [[3]], [[1], [2]], [[3]]],
[[], []],
[[[4, 5]], [[4, 5]]]]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 2',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 1, 2, 1],
expected=[[[[1], [2], [1], [2]], [[3], [3]]],
[[]],
[[[4, 5], [4, 5]]]]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 3',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 1, 1, 2],
expected=[[[[1, 1], [2, 2]], [[3, 3]]], [[]], [[[4, 5, 4, 5]]]]),
dict(
descr='rank=4, ragged_rank=3, repeat all axes',
rt_input=[[[['a'], ['b']], [['c']]], [[]], [[['d', 'e']]]],
multiples=[5, 4, 3, 2],
expected=[[[[b'a']*2, [b'b']*2]*3, [[b'c']*2]*3]*4,
[[]*3]*4,
[[[b'd', b'e']*2]*3]*4]*5),
dict(
descr='rank=5, ragged_rank=4, repeat all axes',
rt_input=[[[[['a']]]]],
multiples=[6, 5, 4, 3, 2],
expected=[[[[[b'a']*2]*3]*4]*5]*6),
#=========================================================================
# multiple=0
#=========================================================================
dict(
descr='rank=4, ragged_rank=3, repeat axis 0 (multiple=0)',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[0, 1, 1, 1],
expected=[]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 1 (multiple=0)',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 0, 1, 1],
expected=[[], [], []]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 2 (multiple=0)',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 1, 0, 1],
expected=[[[], []], [[]], [[]]]),
dict(
descr='rank=4, ragged_rank=3, repeat axis 3 (multiple=0)',
rt_input=[[[[1], [2]], [[3]]], [[]], [[[4, 5]]]],
multiples=[1, 1, 1, 0],
expected=[[[[], []], [[]]], [[]], [[[]]]]),
#=========================================================================
# multiple=1
#=========================================================================
dict(
descr='rank=4, multiples=1 (no repeats)',
rt_input=[[[[1], [2]], [[3], [4]]], [[[5], [6]]]],
multiples=[1, 1, 1, 1],
expected=[[[[1], [2]], [[3], [4]]],
[[[5], [6]]]]),
]) # pyformat: disable
def testRaggedTile(self,
descr,
rt_input,
multiples,
expected,
ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank)
expected_shape = [
None if dim is None else dim * multiple
for (dim, multiple) in zip(rt.shape.as_list(), multiples)
]
# Test with both const & non-const multiples: ragged_tile has a few code
# paths that optimize the case where multiples[d] is known to be 1.
const_multiples = constant_op.constant(multiples, dtypes.int64)
non_const_multiples = array_ops.placeholder_with_default(
const_multiples, shape=[len(multiples)])
for multiples_tensor in (const_multiples, non_const_multiples):
tiled = ragged_array_ops.tile(rt, multiples_tensor)
self.assertEqual(tiled.ragged_rank, rt.ragged_rank)
self.assertEqual(tiled.shape.ndims, rt.shape.ndims)
if multiples_tensor is const_multiples:
self.assertEqual(tiled.shape.as_list(), expected_shape)
self.assertRaggedEqual(tiled, expected)
def testRaggedTileWithTensorInput(self):
# When the input is a `Tensor`, ragged_tile just delegates to tf.tile.
dt = constant_op.constant([[1, 2], [3, 4]])
tiled = ragged_array_ops.tile(dt, [3, 2])
expected = [[1, 2, 1, 2], [3, 4, 3, 4],
[1, 2, 1, 2], [3, 4, 3, 4],
[1, 2, 1, 2], [3, 4, 3, 4]] # pyformat: disable
self.assertRaggedEqual(tiled, expected)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tile_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to convert between RaggedTensors and other tensor types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.ragged import ragged_tensor
def from_tensor(tensor, lengths=None, padding=None, ragged_rank=1,
row_splits_dtype=dtypes.int64, name=None):
if ragged_tensor.is_ragged(tensor):
return tensor
else:
return ragged_tensor.RaggedTensor.from_tensor(
tensor,
lengths=lengths,
padding=padding,
ragged_rank=ragged_rank,
row_splits_dtype=row_splits_dtype,
name=name)
def to_tensor(rt_input, default_value=None, name=None):
if ragged_tensor.is_ragged(rt_input):
return rt_input.to_tensor(default_value, name)
else:
return rt_input
def to_sparse(rt_input, name=None):
return rt_input.to_sparse(name)
def from_sparse(st_input, name=None):
return ragged_tensor.RaggedTensor.from_sparse(st_input, name)
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_conversion_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.platform import googletest
# Example 3d tensor for test cases. Has shape [4, 2, 3].
TENSOR_3D = [[[('%d%d%d' % (i, j, k)).encode('utf-8')
for k in range(3)]
for j in range(2)]
for i in range(4)]
# Example 4d tensor for test cases. Has shape [4, 2, 3, 5].
TENSOR_4D = [[[[('%d%d%d%d' % (i, j, k, l)).encode('utf-8')
for l in range(5)]
for k in range(3)]
for j in range(2)]
for i in range(4)]
@test_util.run_all_in_graph_and_eager_modes
class RaggedUtilTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Docstring examples
dict(
data=['a', 'b', 'c'],
repeats=[3, 0, 2],
axis=0,
expected=[b'a', b'a', b'a', b'c', b'c']),
dict(
data=[[1, 2], [3, 4]],
repeats=[2, 3],
axis=0,
expected=[[1, 2], [1, 2], [3, 4], [3, 4], [3, 4]]),
dict(
data=[[1, 2], [3, 4]],
repeats=[2, 3],
axis=1,
expected=[[1, 1, 2, 2, 2], [3, 3, 4, 4, 4]]),
# Scalar repeats value
dict(
data=['a', 'b', 'c'],
repeats=2,
axis=0,
expected=[b'a', b'a', b'b', b'b', b'c', b'c']),
dict(
data=[[1, 2], [3, 4]],
repeats=2,
axis=0,
expected=[[1, 2], [1, 2], [3, 4], [3, 4]]),
dict(
data=[[1, 2], [3, 4]],
repeats=2,
axis=1,
expected=[[1, 1, 2, 2], [3, 3, 4, 4]]),
# data & repeats are broadcast to have at least one dimension,
# so these are all equivalent:
dict(data=3, repeats=4, axis=0, expected=[3, 3, 3, 3]),
dict(data=[3], repeats=4, axis=0, expected=[3, 3, 3, 3]),
dict(data=3, repeats=[4], axis=0, expected=[3, 3, 3, 3]),
dict(data=[3], repeats=[4], axis=0, expected=[3, 3, 3, 3]),
# Empty tensor
dict(data=[], repeats=[], axis=0, expected=[]),
])
def testRepeat(self, data, repeats, expected, axis=None):
result = ragged_util.repeat(data, repeats, axis)
self.assertAllEqual(result, expected)
@parameterized.parameters([
dict(mode=mode, **args)
for mode in ['constant', 'dynamic', 'unknown_shape']
for args in [
# data & repeats are broadcast to have at least one dimension,
# so these are all equivalent:
dict(data=3, repeats=4, axis=0),
dict(data=[3], repeats=4, axis=0),
dict(data=3, repeats=[4], axis=0),
dict(data=[3], repeats=[4], axis=0),
# 1-dimensional data tensor.
dict(data=[], repeats=5, axis=0),
dict(data=[1, 2, 3], repeats=5, axis=0),
dict(data=[1, 2, 3], repeats=[3, 0, 2], axis=0),
dict(data=[1, 2, 3], repeats=[3, 0, 2], axis=-1),
dict(data=[b'a', b'b', b'c'], repeats=[3, 0, 2], axis=0),
# 2-dimensional data tensor.
dict(data=[[1, 2, 3], [4, 5, 6]], repeats=3, axis=0),
dict(data=[[1, 2, 3], [4, 5, 6]], repeats=3, axis=1),
dict(data=[[1, 2, 3], [4, 5, 6]], repeats=[3, 5], axis=0),
dict(data=[[1, 2, 3], [4, 5, 6]], repeats=[3, 5, 7], axis=1),
# 3-dimensional data tensor: shape=[4, 2, 3].
dict(data=TENSOR_3D, repeats=2, axis=0),
dict(data=TENSOR_3D, repeats=2, axis=1),
dict(data=TENSOR_3D, repeats=2, axis=2),
dict(data=TENSOR_3D, repeats=[2, 0, 4, 1], axis=0),
dict(data=TENSOR_3D, repeats=[3, 2], axis=1),
dict(data=TENSOR_3D, repeats=[1, 3, 1], axis=2),
# 4-dimensional data tensor: shape=[4, 2, 3, 5].
dict(data=TENSOR_4D, repeats=2, axis=0),
dict(data=TENSOR_4D, repeats=2, axis=1),
dict(data=TENSOR_4D, repeats=2, axis=2),
dict(data=TENSOR_4D, repeats=2, axis=3),
dict(data=TENSOR_4D, repeats=[2, 0, 4, 1], axis=0),
dict(data=TENSOR_4D, repeats=[3, 2], axis=1),
dict(data=TENSOR_4D, repeats=[1, 3, 1], axis=2),
dict(data=TENSOR_4D, repeats=[1, 3, 0, 0, 2], axis=3),
]
])
def testValuesMatchesNumpy(self, mode, data, repeats, axis):
# Exception: we can't handle negative axis if data.ndims is unknown.
if axis < 0 and mode == 'unknown_shape':
return
expected = np.repeat(data, repeats, axis)
if mode == 'constant':
data = constant_op.constant(data)
repeats = constant_op.constant(repeats)
elif mode == 'dynamic':
data = constant_op.constant(data)
repeats = constant_op.constant(repeats)
data = array_ops.placeholder_with_default(data, data.shape)
repeats = array_ops.placeholder_with_default(repeats, repeats.shape)
elif mode == 'unknown_shape':
data = array_ops.placeholder_with_default(data, None)
repeats = array_ops.placeholder_with_default(repeats, None)
result = ragged_util.repeat(data, repeats, axis)
self.assertAllEqual(result, expected)
@parameterized.parameters([
dict(
descr='axis >= rank(data)',
mode='dynamic',
data=[1, 2, 3],
repeats=[3, 0, 2],
axis=1,
error='axis=1 out of bounds: expected -1<=axis<1'),
dict(
descr='axis < -rank(data)',
mode='dynamic',
data=[1, 2, 3],
repeats=[3, 0, 2],
axis=-2,
error='axis=-2 out of bounds: expected -1<=axis<1'),
dict(
descr='len(repeats) != data.shape[axis]',
mode='dynamic',
data=[[1, 2, 3], [4, 5, 6]],
repeats=[2, 3],
axis=1,
error='Dimensions 3 and 2 are not compatible'),
dict(
descr='rank(repeats) > 1',
mode='dynamic',
data=[[1, 2, 3], [4, 5, 6]],
repeats=[[3], [5]],
axis=1,
error=r'Shape \(2, 1\) must have rank at most 1'),
dict(
descr='non-integer axis',
mode='constant',
data=[1, 2, 3],
repeats=2,
axis='foo',
exception=TypeError,
error='axis must be an int'),
])
def testError(self,
descr,
mode,
data,
repeats,
axis,
exception=ValueError,
error=None):
# Make sure that this is also an error case for numpy.
with self.assertRaises(exception):
np.repeat(data, repeats, axis)
if mode == 'constant':
data = constant_op.constant(data)
repeats = constant_op.constant(repeats)
elif mode == 'dynamic':
data = constant_op.constant(data)
repeats = constant_op.constant(repeats)
data = array_ops.placeholder_with_default(data, data.shape)
repeats = array_ops.placeholder_with_default(repeats, repeats.shape)
elif mode == 'unknown_shape':
data = array_ops.placeholder_with_default(data, None)
repeats = array_ops.placeholder_with_default(repeats, None)
with self.assertRaisesRegexp(exception, error):
ragged_util.repeat(data, repeats, axis)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_util_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Value for RaggedTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["ragged.RaggedTensorValue"])
class RaggedTensorValue(object):
"""Represents the value of a `RaggedTensor`.
Warning: `RaggedTensorValue` should only be used in graph mode; in
eager mode, the `tf.RaggedTensor` class contains its value directly.
See `tf.RaggedTensor` for a description of ragged tensors.
"""
def __init__(self, values, row_splits):
"""Creates a `RaggedTensorValue`.
Args:
values: A numpy array of any type and shape; or a RaggedTensorValue.
row_splits: A 1-D int32 or int64 numpy array.
"""
if not (isinstance(row_splits, (np.ndarray, np.generic)) and
row_splits.dtype in (np.int64, np.int32) and row_splits.ndim == 1):
raise TypeError("row_splits must be a 1D int32 or int64 numpy array")
if not isinstance(values, (np.ndarray, np.generic, RaggedTensorValue)):
raise TypeError("values must be a numpy array or a RaggedTensorValue")
if (isinstance(values, RaggedTensorValue) and
row_splits.dtype != values.row_splits.dtype):
raise ValueError("row_splits and values.row_splits must have "
"the same dtype")
self._values = values
self._row_splits = row_splits
row_splits = property(
lambda self: self._row_splits,
doc="""The split indices for the ragged tensor value.""")
values = property(
lambda self: self._values,
doc="""The concatenated values for all rows in this tensor.""")
dtype = property(
lambda self: self._values.dtype,
doc="""The numpy dtype of values in this tensor.""")
@property
def flat_values(self):
"""The innermost `values` array for this ragged tensor value."""
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""The row_splits for all ragged dimensions in this ragged tensor value."""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensorValue):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
@property
def ragged_rank(self):
"""The number of ragged dimensions in this ragged tensor value."""
values_is_ragged = isinstance(self._values, RaggedTensorValue)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def shape(self):
"""A tuple indicating the shape of this RaggedTensorValue."""
return (self._row_splits.shape[0] - 1,) + (None,) + self._values.shape[1:]
def __str__(self):
return "<tf.RaggedTensorValue %s>" % self.to_list()
def __repr__(self):
return "tf.RaggedTensorValue(values=%r, row_splits=%r)" % (self._values,
self._row_splits)
def to_list(self):
"""Returns this ragged tensor value as a nested Python list."""
if isinstance(self._values, RaggedTensorValue):
values_as_list = self._values.to_list()
else:
values_as_list = self._values.tolist()
return [
values_as_list[self._row_splits[i]:self._row_splits[i + 1]]
for i in range(len(self._row_splits) - 1)
]
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor_value.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_placeholder op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedPlaceholderOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# dtype, ragged_rank, value_shape, name -> expected
(dtypes.int32, 0, [5], None,
'Tensor("Placeholder:0", shape=(5,), dtype=int32)'),
(dtypes.int32, 1, [], 'ph',
'tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None,), dtype=int32), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.string, 1, [5], 'ph',
'tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None, 5), dtype=string), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.float32, 2, [], 'ph',
'tf.RaggedTensor(values=tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None,), dtype=float32), '
'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
(dtypes.int32, 2, [3, 5], 'ph',
'tf.RaggedTensor(values=tf.RaggedTensor('
'values=Tensor("ph/flat_values:0", shape=(None, 3, 5), dtype=int32), '
'row_splits=Tensor("ph/row_splits_1:0", shape=(None,), dtype=int64)), '
'row_splits=Tensor("ph/row_splits_0:0", shape=(None,), dtype=int64))'),
])
def testRaggedPlaceholder(self, dtype, ragged_rank, value_shape, name,
expected):
if not context.executing_eagerly():
placeholder = ragged_factory_ops.placeholder(
dtype, ragged_rank, value_shape, name)
result = str(placeholder).replace('?', 'None')
self.assertEqual(result, expected)
def testRaggedPlaceholderRaisesExceptionInEagerMode(self):
if context.executing_eagerly():
with self.assertRaises(RuntimeError):
ragged_factory_ops.placeholder(dtypes.int32, 1, [])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_placeholder_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.bounding_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorBoundingShapeOp(ragged_test_util.RaggedTensorTestCase):
def testDocStringExample(self):
# This is the example from ragged.bounding_shape.__doc__.
rt = ragged_factory_ops.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9],
[10]])
self.assertRaggedEqual(rt.bounding_shape(), [5, 4])
def test2DRaggedTensorWithOneRaggedDimension(self):
values = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7])
rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7])
self.assertRaggedEqual(rt1.bounding_shape(), [5, 3])
self.assertRaggedEqual(rt2.bounding_shape(), [1, 7])
self.assertRaggedEqual(rt3.bounding_shape(), [3, 7])
def test3DRaggedTensorWithOneRaggedDimension(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
rt1 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 2, 5, 6, 6, 7])
rt2 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 7])
rt3 = ragged_tensor.RaggedTensor.from_row_splits(values, [0, 0, 7, 7])
self.assertRaggedEqual(rt1.bounding_shape(), [5, 3, 2])
self.assertRaggedEqual(rt2.bounding_shape(), [1, 7, 2])
self.assertRaggedEqual(rt3.bounding_shape(), [3, 7, 2])
def testExplicitAxisOptimizations(self):
rt = ragged_tensor.RaggedTensor.from_row_splits(b'a b c d e f g'.split(),
[0, 2, 5, 6, 6, 7])
self.assertRaggedEqual(rt.bounding_shape(0), 5)
self.assertRaggedEqual(rt.bounding_shape(1), 3)
self.assertRaggedEqual(rt.bounding_shape([1, 0]), [3, 5])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor_bounding_shape_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedGatherOpTest(ragged_test_util.RaggedTensorTestCase):
def testDocStringExamples(self):
params = constant_op.constant(['a', 'b', 'c', 'd', 'e'])
indices = constant_op.constant([3, 1, 2, 1, 0])
ragged_params = ragged_factory_ops.constant([['a', 'b', 'c'], ['d'], [],
['e']])
ragged_indices = ragged_factory_ops.constant([[3, 1, 2], [1], [], [0]])
self.assertRaggedEqual(
ragged_gather_ops.gather(params, ragged_indices),
[[b'd', b'b', b'c'], [b'b'], [], [b'a']])
self.assertRaggedEqual(
ragged_gather_ops.gather(ragged_params, indices),
[[b'e'], [b'd'], [], [b'd'], [b'a', b'b', b'c']])
self.assertRaggedEqual(
ragged_gather_ops.gather(ragged_params, ragged_indices),
[[[b'e'], [b'd'], []], [[b'd']], [], [[b'a', b'b', b'c']]])
def testTensorParamsAndTensorIndices(self):
params = ['a', 'b', 'c', 'd', 'e']
indices = [2, 0, 2, 1]
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices), [b'c', b'a', b'c', b'b'])
self.assertIsInstance(ragged_gather_ops.gather(params, indices), ops.Tensor)
def testRaggedParamsAndTensorIndices(self):
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'],
[], ['g']])
indices = [2, 0, 2, 1]
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices),
[[b'f'], [b'a', b'b'], [b'f'], [b'c', b'd', b'e']])
def testTensorParamsAndRaggedIndices(self):
params = ['a', 'b', 'c', 'd', 'e']
indices = ragged_factory_ops.constant([[2, 1], [1, 2, 0], [3]])
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices),
[[b'c', b'b'], [b'b', b'c', b'a'], [b'd']])
def testRaggedParamsAndRaggedIndices(self):
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'],
[], ['g']])
indices = ragged_factory_ops.constant([[2, 1], [1, 2, 0], [3]])
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices),
[[[b'f'], [b'c', b'd', b'e']], # [[p[2], p[1] ],
[[b'c', b'd', b'e'], [b'f'], [b'a', b'b']], # [p[1], p[2], p[0]],
[[]]] # [p[3] ]]
) # pyformat: disable
def testRaggedParamsAndScalarIndices(self):
params = ragged_factory_ops.constant([['a', 'b'], ['c', 'd', 'e'], ['f'],
[], ['g']])
indices = 1
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices), [b'c', b'd', b'e'])
def test3DRaggedParamsAnd2DTensorIndices(self):
params = ragged_factory_ops.constant([[['a', 'b'], []],
[['c', 'd'], ['e'], ['f']], [['g']]])
indices = [[1, 2], [0, 1], [2, 2]]
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices),
[[[[b'c', b'd'], [b'e'], [b'f']], [[b'g']]], # [[p1, p2],
[[[b'a', b'b'], []], [[b'c', b'd'], [b'e'], [b'f']]], # [p0, p1],
[[[b'g']], [[b'g']]]] # [p2, p2]]
) # pyformat: disable
def testTensorParamsAnd4DRaggedIndices(self):
indices = ragged_factory_ops.constant(
[[[[3, 4], [0, 6]], []], [[[2, 1], [1, 0]], [[2, 5]], [[2, 3]]],
[[[1, 0]]]], # pyformat: disable
ragged_rank=2,
inner_shape=(2,))
params = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
self.assertRaggedEqual(
ragged_gather_ops.gather(params, indices),
[[[[b'd', b'e'], [b'a', b'g']], []],
[[[b'c', b'b'], [b'b', b'a']], [[b'c', b'f']], [[b'c', b'd']]],
[[[b'b', b'a']]]]) # pyformat: disable
def testOutOfBoundsError(self):
tensor_params = ['a', 'b', 'c']
tensor_indices = [0, 1, 2]
ragged_params = ragged_factory_ops.constant([['a', 'b'], ['c']])
ragged_indices = ragged_factory_ops.constant([[0, 3]])
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 3\)'):
self.evaluate(ragged_gather_ops.gather(tensor_params, ragged_indices))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices\[2\] = 2 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, tensor_indices))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r'indices\[1\] = 3 is not in \[0, 2\)'):
self.evaluate(ragged_gather_ops.gather(ragged_params, ragged_indices))
def testUnknownIndicesRankError(self):
if context.executing_eagerly():
return
params = ragged_factory_ops.constant([], ragged_rank=1)
indices = constant_op.constant([0], dtype=dtypes.int64)
indices = array_ops.placeholder_with_default(indices, None)
self.assertRaisesRegexp(ValueError,
r'indices\.shape\.ndims must be known statically',
ragged_gather_ops.gather, params, indices)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_gather_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.size."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedSizeOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
{'size': 1, 'test_input': 1},
{'size': 0, 'test_input': []},
{'size': 0, 'test_input': [], 'ragged_rank': 1},
{'size': 3, 'test_input': [1, 1, 1]},
{'size': 3, 'test_input': [[1, 1], [1]]},
{'size': 5, 'test_input': [[[1, 1, 1], [1]], [[1]]]},
{'size': 6, 'test_input': [[[1, 1], [1, 1]], [[1, 1]]], 'ragged_rank': 1},
])
def testRaggedSize(self, test_input, size, ragged_rank=None):
input_rt = ragged_factory_ops.constant(test_input, ragged_rank=ragged_rank)
self.assertAllEqual(ragged_array_ops.size(input_rt), size)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_size_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_array_ops.concat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedConcatOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def _rt_inputs_to_tensors(self, rt_inputs, ragged_ranks=None):
if ragged_ranks is None:
ragged_ranks = [None] * len(rt_inputs)
return [ # pylint: disable=g-long-ternary
ragged_factory_ops.constant(rt_input, ragged_rank=rrank)
if rrank != 0 else constant_op.constant(rt_input)
for (rt_input, rrank) in zip(rt_inputs, ragged_ranks)
]
@parameterized.parameters(
dict(
descr='Two rank-2 inputs with empty value axis=1',
rt_inputs=([[]], [[]]),
axis=1,
expected=[[]]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],
[b'b10']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12'],
[b'a20', b'a21', b'a22', b'b20']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-2',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21']], # shape=(3, None)
[['b00'], ['b10']]), # shape=(2, None)
axis=-2,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21'], [b'b00'],
[b'b10']]),
dict(
descr='Two rank-2 inputs (ragged_rank=1), axis=-1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']]), # shape=(3, None)
axis=-1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12'],
[b'a20', b'a21', b'a22', b'b20']],
expected_shape=[3, None]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=0',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10']], # shape=(2, None)
[['c00'], ['c10', 'c11'], ['c21']]), # shape=(3, None)
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21', b'a22'], [b'b00'],
[b'b10'], [b'c00'], [b'c10', b'c11'], [b'c21']]),
dict(
descr='Three rank-2 inputs (ragged_rank=1), axis=1',
rt_inputs=(
[['a00', 'a01'], [], ['a20', 'a21', 'a22']], # shape=(3, None)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[], ['c10', 'c11'], ['c20', 'c21']]), # shape=(3, None)
axis=1,
expected=[
[b'a00', b'a01', b'b00'],
[b'b10', b'b11', b'b12', b'c10', b'c11'],
[b'a20', b'a21', b'a22', b'b20', b'c20', b'c21']]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=0',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [['c100', 'c101', 'c102', 'c103']], [[], ['c210', 'c211']]]),
axis=0,
expected=[
[[b'a000', b'a001'], [b'a010']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111']],
[[b'b000']],
[[b'b100', b'b101'], [b'b110']],
[],
[[b'c100', b'c101', b'c102', b'c103']],
[[], [b'c210', b'c211']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[['b000']], [['b100', 'b101'], ['b110']]],
[[], [[], ['c110', 'c111']]]),
axis=1,
expected=[
[[b'a000', b'a001'], [b'a010'], [b'b000']],
[[b'a100', b'a101', b'a102'], [b'a110', b'a111'],
[b'b100', b'b101'], [b'b110'], [], [b'c110', b'c111']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=2',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=2,
expected=[
[[b'a000', b'a001', b'c000'],
[b'a010', b'b010', b'b011', b'c010']],
[[b'a100', b'a101', b'a102', b'b100', b'b101'],
[b'a110', b'a111', b'b110', b'c110', b'c111']]]),
dict(
descr='Three rank-3 inputs (ragged_rank=2), axis=-1',
rt_inputs=(
[[['a000', 'a001'], ['a010']],
[['a100', 'a101', 'a102'], ['a110', 'a111']]],
[[[], ['b010', 'b011']], [['b100', 'b101'], ['b110']]],
[[['c000'], ['c010']], [[], ['c110', 'c111']]]),
axis=-1,
expected=[
[[b'a000', b'a001', b'c000'],
[b'a010', b'b010', b'b011', b'c010']],
[[b'a100', b'a101', b'a102', b'b100', b'b101'],
[b'a110', b'a111', b'b110', b'c110', b'c111']]]),
dict(
descr='ragged_concat([uniform, ragged, uniform], axis=1)',
ragged_ranks=[0, 1, 0],
rt_inputs=(
[['0('], ['1('], ['2(']], # shape=(3, 1)
[['b00'], ['b10', 'b11', 'b12'], ['b20']], # shape=(3, None)
[[')0'], [')1'], [')2']]), # shape=(3, 1)
axis=1,
expected=[
[b'0(', b'b00', b')0'],
[b'1(', b'b10', b'b11', b'b12', b')1'],
[b'2(', b'b20', b')2']]),
dict(
descr='ragged_concat([uniform, uniform], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],
[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']],
expected_ragged_rank=1),
dict(
descr='ragged_concat([uniform, ragged], axis=0)',
ragged_ranks=[0, 1],
rt_inputs=(
[['a00', 'a01'], ['a10', 'a11'], ['a20', 'a21']], # shape=(3, 2)
[['b00', 'b01', 'b02'], ['b10', 'b11', 'b12']]), # shape=(2, 3)
axis=0,
expected=[
[b'a00', b'a01'], [b'a10', b'a11'], [b'a20', b'a21'],
[b'b00', b'b01', b'b02'], [b'b10', b'b11', b'b12']]),
dict(
descr='ragged_concat([uniform, ragged], axis=0) with rank-3 inputs',
ragged_ranks=[0, 2],
rt_inputs=(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]], # shape = (2, 2, 2)
[[[8], [8, 8]]]), # shape = (2, None, None)
axis=0,
expected=[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], [8, 8]]]),
dict(
descr='Two rank-3 inputs with ragged_rank=1, axis=-1',
ragged_ranks=[1, 1],
rt_inputs=(
[[[0, 1], [2, 3], [4, 5]], [], [[6, 7], [8, 9]]],
[[[9, 8], [7, 6], [5, 4]], [], [[3, 2], [1, 0]]]),
axis=-1,
expected=[
[[0, 1, 9, 8], [2, 3, 7, 6], [4, 5, 5, 4]], [],
[[6, 7, 3, 2], [8, 9, 1, 0]]],
expected_ragged_rank=1),
dict(
descr='ragged_concat([vector, vector], axis=0)',
ragged_ranks=[0, 0],
rt_inputs=([1, 2, 3], [4, 5, 6]),
axis=0,
expected=[1, 2, 3, 4, 5, 6]),
dict(
descr='One input (so ragged_conat is a noop)',
rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],),
axis=0,
expected=[[b'a00', b'a01'], [], [b'a20', b'a21']]),
) # pyformat: disable
def testRaggedConcat(self,
descr,
rt_inputs,
axis,
expected,
ragged_ranks=None,
expected_ragged_rank=None,
expected_shape=None):
rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
concatenated = ragged_concat_ops.concat(rt_inputs, axis)
if expected_ragged_rank is not None:
self.assertEqual(concatenated.ragged_rank, expected_ragged_rank)
if expected_shape is not None:
self.assertEqual(concatenated.shape.as_list(), expected_shape)
self.assertRaggedEqual(concatenated, expected)
@parameterized.parameters(
dict(
rt_inputs=(),
axis=0,
error=ValueError,
message=r'rt_inputs may not be empty\.'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=r'foo',
error=TypeError,
message='axis must be an int'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=-3,
error=ValueError,
message='axis=-3 out of bounds: expected -2<=axis<2'),
dict(
rt_inputs=([[1, 2]], [[3, 4]]),
axis=2,
error=ValueError,
message='axis=2 out of bounds: expected -2<=axis<2'),
dict(
ragged_ranks=(0, 0),
rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),
axis=1,
error=(ValueError, errors.InvalidArgumentError)),
)
def testStaticError(self,
rt_inputs,
axis,
error,
message=None,
ragged_ranks=None):
rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
self.assertRaisesRegexp(error, message, ragged_concat_ops.concat, rt_inputs,
axis)
@parameterized.parameters([
dict(
ragged_ranks=(1, 1),
rt_inputs=([[1, 2]], [[3, 4], [5, 6]]),
axis=1,
error=errors.InvalidArgumentError,
message='Input tensors have incompatible shapes'),
])
def testRuntimeError(self, rt_inputs, axis, error, message,
ragged_ranks=None):
if context.executing_eagerly():
return
rt_inputs = [
array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs
]
concatenated = ragged_concat_ops.concat(rt_inputs, axis)
with self.assertRaisesRegexp(error, message):
self.evaluate(concatenated)
def testNegativeAxisWithUnknownRankError(self):
if context.executing_eagerly():
return
rt_inputs = [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64)
]
self.assertRaisesRegexp(
ValueError, r'axis may only be negative if ndims is statically known.',
ragged_concat_ops.concat, rt_inputs, -1)
def testSingleTensorInput(self):
"""Tests ragged_concat with a single tensor input.
Usually, we pass a list of values in for rt_inputs. However, you can
also pass in a single value (as with tf.concat), in which case it simply
returns that tensor. This test exercises that path.
"""
rt_inputs = ragged_factory_ops.constant([[1, 2], [3, 4]])
concatenated = ragged_concat_ops.concat(rt_inputs, 0)
self.assertRaggedEqual(concatenated, [[1, 2], [3, 4]])
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_concat_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.python.ops.ragged_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
class _SliceBuilder(object):
"""Helper to construct arguments for __getitem__.
Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.
"""
def __getitem__(self, slice_spec):
return slice_spec
SLICE_BUILDER = _SliceBuilder()
def _make_tensor_slice_spec(slice_spec, use_constant=True):
"""Wraps all integers in an extended slice spec w/ a tensor.
This function is used to help test slicing when the slice spec contains
tensors, rather than integers.
Args:
slice_spec: The extended slice spec.
use_constant: If true, then wrap each integer with a tf.constant. If false,
then wrap each integer with a tf.placeholder.
Returns:
A copy of slice_spec, but with each integer i replaced with tf.constant(i).
"""
def make_piece_scalar(piece):
if isinstance(piece, int):
scalar = constant_op.constant(piece)
if use_constant:
return scalar
else:
return array_ops.placeholder_with_default(scalar, [])
elif isinstance(piece, slice):
return slice(
make_piece_scalar(piece.start), make_piece_scalar(piece.stop),
make_piece_scalar(piece.step))
else:
return piece
if isinstance(slice_spec, tuple):
return tuple(make_piece_scalar(piece) for piece in slice_spec)
else:
return make_piece_scalar(slice_spec)
# Example 2D ragged tensor value with one ragged dimension and with scalar
# values, expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],
[b'g']]
EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]
EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# Example 4D ragged tensor value, with two ragged dimensions and with values
# whose shape is [2], expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_4D = [
[ # rt[0]
[[1, 2], [3, 4], [5, 6]], # rt[0][0]
[[7, 8], [9, 10], [11, 12]]], # rt[0][1]
[], # rt[1]
[ # rt[2]
[[13, 14], [15, 16], [17, 18]]], # rt[2][0]
[ # rt[3]
[[19, 20]]] # rt[3][0]
] # pyformat: disable
EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]
EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]
EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18],
[19, 20]]
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name
#=============================================================================
# RaggedTensor class docstring examples
#=============================================================================
def testClassDocStringExamples(self):
# From section: "Component Tensors"
rt = RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
self.assertRaggedEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt
# From section: "Alternative Row-Partitioning Schemes"
values = [3, 1, 4, 1, 5, 9, 2, 6]
rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
rt3 = RaggedTensor.from_value_rowids(
values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
for rt in (rt1, rt2, rt3, rt4, rt5):
self.assertRaggedEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt1, rt2, rt3, rt4, rt5
# From section: "Multiple Ragged Dimensions"
inner_rt = RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
outer_rt = RaggedTensor.from_row_splits(
values=inner_rt, row_splits=[0, 3, 3, 5])
self.assertEqual(outer_rt.ragged_rank, 2)
self.assertEqual(
self.eval_to_list(outer_rt),
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del inner_rt, outer_rt
# From section: "Multiple Ragged Dimensions"
rt = RaggedTensor.from_nested_row_splits(
flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8]))
self.assertEqual(
self.eval_to_list(rt), [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del rt
# From section: "Uniform Inner Dimensions"
rt = RaggedTensor.from_row_splits(
values=array_ops.ones([5, 3]), row_splits=[0, 2, 5])
self.assertEqual(
self.eval_to_list(rt),
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
self.assertEqual(rt.shape.as_list(), [2, None, 3])
del rt
#=============================================================================
# RaggedTensorValue Constructor
#=============================================================================
def testRaggedTensorValueConstruction(self):
values = np.array(b'a b c d e f g'.split())
splits = np.array([0, 2, 5, 6, 6, 7], dtype=np.int64)
splits2 = np.array([0, 3, 5], dtype=np.int64)
# Test construction of a RaggedTensorValue with ragged_rank=1.
rt_value = ragged_tensor_value.RaggedTensorValue(values, splits)
self.assertEqual(rt_value.row_splits.dtype, np.int64)
self.assertEqual(rt_value.shape, (5, None))
self.assertLen(rt_value.nested_row_splits, 1)
self.assertAllEqual(splits, rt_value.row_splits)
self.assertAllEqual(values, rt_value.values)
self.assertAllEqual(splits, rt_value.nested_row_splits[0])
self.assertAllEqual(values, rt_value.flat_values)
# Test construction of a RaggedTensorValue with ragged_rank=2.
rt_value = ragged_tensor_value.RaggedTensorValue(
values=ragged_tensor_value.RaggedTensorValue(values, splits),
row_splits=splits2)
self.assertEqual(rt_value.row_splits.dtype, np.int64)
self.assertEqual(rt_value.shape, (2, None, None))
self.assertLen(rt_value.nested_row_splits, 2)
self.assertAllEqual(splits2, rt_value.row_splits)
self.assertAllEqual(splits, rt_value.values.row_splits)
self.assertAllEqual(splits2, rt_value.nested_row_splits[0])
self.assertAllEqual(splits, rt_value.nested_row_splits[1])
self.assertAllEqual(values, rt_value.values.values)
self.assertAllEqual(values, rt_value.flat_values)
#=============================================================================
# RaggedTensor Constructor (private)
#=============================================================================
def testRaggedTensorConstruction(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rt = RaggedTensor(values=values, row_splits=row_splits, internal=True)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testRaggedTensorConstructionErrors(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
with self.assertRaisesRegexp(ValueError,
'RaggedTensor constructor is private'):
RaggedTensor(values=values, row_splits=row_splits)
with self.assertRaisesRegexp(TypeError,
'values must be a Tensor or RaggedTensor'):
RaggedTensor(values=range(7), row_splits=row_splits, internal=True)
with self.assertRaisesRegexp(TypeError,
'Row-partitioning argument must be a Tensor'):
RaggedTensor(values=values, row_splits=[0, 2, 2, 5, 6, 7], internal=True)
with self.assertRaisesRegexp(ValueError,
r'Shape \(6, 1\) must have rank 1'):
RaggedTensor(
values=values,
row_splits=array_ops.expand_dims(row_splits, 1),
internal=True)
with self.assertRaisesRegexp(TypeError,
'Cached value must be a Tensor or None.'):
RaggedTensor(
values=values,
row_splits=row_splits,
cached_row_lengths=[2, 3, 4],
internal=True)
#=============================================================================
# RaggedTensor Factory Ops
#=============================================================================
def testFromValueRowIdsWithDerivedNRows(self):
# nrows is known at graph creation time.
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithDerivedNRowsDynamic(self):
# nrows is not known at graph creation time.
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
value_rowids = array_ops.placeholder_with_default(value_rowids, shape=None)
rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
if context.executing_eagerly():
self.assertEqual(rt.shape.as_list(), [5, None])
else:
self.assertEqual(rt.shape.as_list(), [None, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(7, dtypes.int64)
rt = RaggedTensor.from_value_rowids(values, value_rowids, nrows,
validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [7, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g'], [], []])
def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
rt = RaggedTensor.from_value_rowids(values, value_rowids, nrows,
validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, nrows)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithEmptyValues(self):
rt = RaggedTensor.from_value_rowids([], [])
rt_nrows = rt.nrows()
self.assertEqual(rt.dtype, dtypes.float32)
self.assertEqual(rt.shape.as_list(), [0, None])
self.assertEqual(rt.ragged_rank, 1)
self.assertEqual(rt.values.shape.as_list(), [0])
self.assertEqual(rt.value_rowids().shape.as_list(), [0])
self.assertEqual(self.eval_to_list(rt_nrows), 0)
self.assertEqual(self.eval_to_list(rt), [])
def testFromRowSplits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rt = RaggedTensor.from_row_splits(values, row_splits, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_splits = rt.row_splits
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_row_splits, row_splits)
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowSplitsWithEmptySplits(self):
err_msg = 'row_splits tensor may not be empty'
with self.assertRaisesRegexp(ValueError, err_msg):
RaggedTensor.from_row_splits([], [])
def testFromRowStarts(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64)
rt = RaggedTensor.from_row_starts(values, row_starts, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_starts = rt.row_starts()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertAllEqual(rt_row_starts, row_starts)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLimits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_limits = constant_op.constant([2, 2, 5, 6, 7], dtypes.int64)
rt = RaggedTensor.from_row_limits(values, row_limits, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_limits = rt.row_limits()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertAllEqual(rt_row_limits, row_limits)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLengths(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_lengths = constant_op.constant([2, 0, 3, 1, 1], dtypes.int64)
rt = RaggedTensor.from_row_lengths(values, row_lengths, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_lengths = rt.row_lengths()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_row_lengths, row_lengths) # cached_nrows
self.assertEqual(self.eval_to_list(rt_nrows), 5)
self.assertAllEqual(rt_row_lengths, row_lengths)
self.assertEqual(
self.eval_to_list(rt),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromNestedValueRowIdsWithDerivedNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_values_values = rt_values.values
rt_values_value_rowids = rt_values.value_rowids()
self.assertIs(rt_values_values, values)
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertEqual(
self.eval_to_list(rt),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
nrows = [
constant_op.constant(6, dtypes.int64),
constant_op.constant(6, dtypes.int64)
]
rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids,
nrows)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [6, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
rt_values_values = rt_values.values
rt_values_value_rowids = rt_values.value_rowids()
rt_values_nrows = rt_values.nrows()
self.assertIs(rt_values_values, values)
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertAllEqual(rt_nrows, nrows[0])
self.assertAllEqual(rt_values_nrows, nrows[1])
self.assertEqual(
self.eval_to_list(rt), [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [],
[[b'f'], [b'g'], []], [], []])
def testFromNestedValueRowIdsWithExplicitNRowsMismatch(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
nrows = [constant_op.constant(6, dtypes.int64)]
with self.assertRaisesRegexp(
ValueError, 'nested_nrows must have the same '
'length as nested_value_rowids'):
RaggedTensor.from_nested_value_rowids(values, nested_value_rowids, nrows)
def testFromNestedValueRowIdsWithNonListInput(self):
with self.assertRaisesRegexp(
TypeError, 'nested_value_rowids must be a list of Tensors'):
RaggedTensor.from_nested_value_rowids(
[1, 2, 3], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64))
with self.assertRaisesRegexp(TypeError,
'nested_nrows must be a list of Tensors'):
RaggedTensor.from_nested_value_rowids([1, 2, 3], [[0, 1, 2], [0, 1, 2]],
constant_op.constant([3, 3]))
def testFromNestedRowSplits(self):
flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [
constant_op.constant([0, 2, 3, 3, 5], dtypes.int64),
constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
]
rt = RaggedTensor.from_nested_row_splits(flat_values, nested_row_splits,
validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_row_splits = rt.row_splits
rt_values_values = rt_values.values
rt_values_row_splits = rt_values.row_splits
self.assertIs(rt_values_values, flat_values)
self.assertIs(rt_row_splits, nested_row_splits[0])
self.assertIs(rt_values_row_splits, nested_row_splits[1])
self.assertEqual(
self.eval_to_list(rt),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedRowSplitsWithNonListInput(self):
with self.assertRaisesRegexp(TypeError,
'nested_row_splits must be a list of Tensors'):
RaggedTensor.from_nested_row_splits(
[1, 2], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64))
def testFromValueRowIdsWithBadNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
with self.assertRaisesRegexp(ValueError, r'Expected nrows >= 0; got -2'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=array_ops.placeholder_with_default(value_rowids, None),
nrows=-2)
with self.assertRaisesRegexp(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, '
r'value_rowids\[-1\]=4'):
RaggedTensor.from_value_rowids(
values=values, value_rowids=value_rowids, nrows=2)
with self.assertRaisesRegexp(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, '
r'value_rowids\[-1\]=4'):
RaggedTensor.from_value_rowids(
values=values, value_rowids=value_rowids, nrows=4)
with self.assertRaisesRegexp(ValueError,
r'Shape \(7, 1\) must have rank 1'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=array_ops.expand_dims(value_rowids, 1),
nrows=nrows)
with self.assertRaisesRegexp(ValueError, r'Shape \(1,\) must have rank 0'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=value_rowids,
nrows=array_ops.expand_dims(nrows, 0))
def testGraphMismatch(self):
if not context.executing_eagerly():
with ops.Graph().as_default():
values = constant_op.constant([1, 2, 3], dtypes.int64)
with ops.Graph().as_default():
splits = constant_op.constant([0, 2, 3], dtypes.int64)
self.assertRaisesRegexp(ValueError,
'.* must be from the same graph as .*',
RaggedTensor.from_row_splits, values, splits)
#=============================================================================
# Ragged Value & Row-Partitioning Tensor Accessors
#=============================================================================
def testRaggedTensorAccessors_2d(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
rt1 = RaggedTensor.from_row_splits(values, row_splits)
rt2 = RaggedTensor.from_value_rowids(values, value_rowids)
for rt in [rt1, rt2]:
self.assertRaggedEqual(
rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertAllEqual(rt.values, [b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertAllEqual(rt.value_rowids(), [0, 0, 2, 2, 2, 3, 4])
self.assertAllEqual(rt.nrows(), 5)
self.assertAllEqual(rt.row_splits, [0, 2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_starts(), [0, 2, 2, 5, 6])
self.assertAllEqual(rt.row_limits(), [2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_lengths(), [2, 0, 3, 1, 1])
self.assertAllEqual(rt.flat_values,
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertLen(rt.nested_row_splits, 1)
self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 2, 5, 6, 7])
def testRaggedTensorAccessors_3d_with_ragged_rank_1(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
rt1 = RaggedTensor.from_row_splits(values, row_splits)
rt2 = RaggedTensor.from_value_rowids(values, value_rowids)
for rt in [rt1, rt2]:
self.assertEqual(
self.eval_to_list(rt),
[[[0, 1], [2, 3]], [], [[4, 5], [6, 7], [8, 9]], [[10, 11]],
[[12, 13]]])
self.assertEqual(
self.eval_to_list(rt.values),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertEqual(
self.eval_to_list(rt.value_rowids()), [0, 0, 2, 2, 2, 3, 4])
self.assertEqual(self.eval_to_list(rt.nrows()), 5)
self.assertEqual(self.eval_to_list(rt.row_splits), [0, 2, 2, 5, 6, 7])
self.assertEqual(self.eval_to_list(rt.row_starts()), [0, 2, 2, 5, 6])
self.assertEqual(self.eval_to_list(rt.row_limits()), [2, 2, 5, 6, 7])
self.assertEqual(self.eval_to_list(rt.row_lengths()), [2, 0, 3, 1, 1])
self.assertEqual(
self.eval_to_list(rt.flat_values),
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual([self.eval_to_list(s) for s in rt.nested_row_splits],
[[0, 2, 2, 5, 6, 7]])
def testRaggedTensorAccessors_3d_with_ragged_rank_2(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [
constant_op.constant([0, 2, 3, 3, 5], dtypes.int64),
constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
]
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
rt1 = RaggedTensor.from_nested_row_splits(values, nested_row_splits)
rt2 = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids)
for rt in [rt1, rt2]:
self.assertEqual(
self.eval_to_list(rt),
[[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertEqual(
self.eval_to_list(rt.values),
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(rt.values.shape.dims[0].value, 5)
self.assertEqual(self.eval_to_list(rt.value_rowids()), [0, 0, 1, 3, 3])
self.assertEqual(self.eval_to_list(rt.nrows()), 4)
self.assertEqual(self.eval_to_list(rt.row_splits), [0, 2, 3, 3, 5])
self.assertEqual(self.eval_to_list(rt.row_starts()), [0, 2, 3, 3])
self.assertEqual(self.eval_to_list(rt.row_limits()), [2, 3, 3, 5])
self.assertEqual(self.eval_to_list(rt.row_lengths()), [2, 1, 0, 2])
self.assertEqual(
self.eval_to_list(rt.flat_values),
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual([self.eval_to_list(s) for s in rt.nested_row_splits],
[[0, 2, 3, 3, 5], [0, 2, 2, 5, 6, 7]])
#=============================================================================
# RaggedTensor.shape
#=============================================================================
def testShape(self):
"""Tests for RaggedTensor.shape."""
rt1 = RaggedTensor.from_row_splits(b'a b c d e f g'.split(),
[0, 2, 5, 6, 6, 7])
self.assertEqual(rt1.shape.as_list(), [5, None])
rt2 = RaggedTensor.from_row_splits(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]],
[0, 2, 5, 6, 6, 7])
self.assertEqual(rt2.shape.as_list(), [5, None, 2])
rt3 = RaggedTensor.from_row_splits(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], [0, 2, 2, 3])
self.assertEqual(rt3.shape.as_list(), [3, None, 2, 2])
rt4 = RaggedTensor.from_row_splits(rt3, [0, 1, 3, 3])
self.assertEqual(rt4.shape.as_list(), [3, None, None, 2, 2])
if not context.executing_eagerly():
rt5 = RaggedTensor.from_row_splits(
array_ops.placeholder(dtype=dtypes.string), [0, 2, 3, 5])
self.assertEqual(rt5.shape.ndims, None)
rt6 = RaggedTensor.from_row_splits(
[1, 2, 3], array_ops.placeholder(dtype=dtypes.int64))
self.assertEqual(rt6.shape.as_list(), [None, None])
#=============================================================================
# RaggedTensor.__getitem__
#=============================================================================
def _TestGetItem(self, rt, slice_spec, expected):
"""Helper function for testing RaggedTensor.__getitem__.
Checks that calling `rt.__getitem__(slice_spec) returns the expected value.
Checks three different configurations for each slice spec:
* Call __getitem__ with the slice spec as-is (with int values)
* Call __getitem__ with int values in the slice spec wrapped in
`tf.constant()`.
* Call __getitem__ with int values in the slice spec wrapped in
`tf.compat.v1.placeholder()` (so value is not known at graph
construction time).
Args:
rt: The RaggedTensor to test.
slice_spec: The slice spec.
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
"""
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = self.eval_to_list(rt.__getitem__(slice_spec))
value2 = self.eval_to_list(rt.__getitem__(tensor_slice_spec1))
value3 = self.eval_to_list(rt.__getitem__(tensor_slice_spec2))
self.assertEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
def _TestGetItemException(self, rt, slice_spec, expected, message):
"""Helper function for testing RaggedTensor.__getitem__ exceptions."""
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
self.assertRaisesRegexp(expected, message, rt.__getitem__, slice_spec)
self.assertRaisesRegexp(expected, message, rt.__getitem__,
tensor_slice_spec1)
@parameterized.parameters(
# Tests for rt[i]
(SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),
(SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),
# Tests for rt[i:]
(SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),
(SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),
(SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),
(SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),
# Tests for rt[:j]
(SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),
(SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),
(SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),
(SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),
# Tests for rt[i:j]
(SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),
(SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),
(SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),
(SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),
# Tests for rt[i, j]
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),
(SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
(SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_2D),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),
(SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[-1:, 1:4],
[row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# TODO(edloper): Add tests for strided slices, once support is added.
)
def testRaggedTensorGetItemWithRaggedRank1(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertEqual(self.eval_to_list(rt), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
# pylint: disable=invalid-slice-index
@parameterized.parameters(
# Tests for out-of-bound errors
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 2], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[3, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
# Indexing into an inner ragged dimension
(SLICE_BUILDER[:, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[:1, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[..., 3], ValueError,
'Cannot index into an inner ragged dimension'),
# Tests for type errors
(SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[1:3:0.5], TypeError, re.escape(
array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 1:3:0.5], TypeError,
'slice strides must be integers or None'),
(SLICE_BUILDER[:, 0.5:1.5], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 'foo':'foo'], TypeError,
'slice offsets must be integers or None'),
# Tests for other errors
(SLICE_BUILDER[..., 0, 0, 0], IndexError,
'Too many indices for RaggedTensor'),
)
def testRaggedTensorGetItemErrorsWithRaggedRank1(self, slice_spec, expected,
message):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertEqual(self.eval_to_list(rt), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
# Tests for rt[index, index, ...]
(SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),
(SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),
(SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),
(SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),
(SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
# Tests for rt[index, slice, ...]
(SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),
(SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),
(SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),
(SLICE_BUILDER[1, :, :, 1], []),
(SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),
(SLICE_BUILDER[3, :, :, 1], [[20]]),
# Tests for rt[slice, slice, ...]
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],
[[20]]]),
(SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],
[[19]]]),
(SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),
(SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_4D),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, :-1],
[[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1:2],
[[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[1:, 1:3, 1:2],
[[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),
(SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::2],
[[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1::2],
[[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
# TODO(edloper): Add tests for strided slices, once support is added.
# TODO(edloper): Add tests slicing inner ragged dimensions, one support
# is added.
)
def testRaggedTensorGetItemWithRaggedRank2(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertEqual(self.eval_to_list(rt), EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
# Test for errors in unsupported cases
(SLICE_BUILDER[:, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[:, :, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
# Test for out-of-bounds errors.
(SLICE_BUILDER[1, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[0, 0, 3],
(IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 5], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
)
def testRaggedTensorGetItemErrorsWithRaggedRank2(self, slice_spec, expected,
message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertEqual(self.eval_to_list(rt), EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[:], []),
(SLICE_BUILDER[2:], []),
(SLICE_BUILDER[:-3], []),
)
def testRaggedTensorGetItemWithEmptyTensor(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
)
def testRaggedTensorGetItemErrorsWithEmptyTensor(self, slice_spec, expected,
message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
)
def testRaggedTensorGetItemWithPlaceholderShapes(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Intentionally use an unknown shape for `splits`, to force the code path
# that deals with having nrows unknown at graph construction time.
splits = constant_op.constant(
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
self.assertEqual(self.eval_to_list(rt), EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[..., 2], ValueError,
'Ellipsis not supported for unknown shape RaggedTensors'),)
def testRaggedTensorGetItemErrorsWithPlaceholderShapes(
self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
if not context.executing_eagerly():
# Intentionally use an unknown shape for `values`.
values = array_ops.placeholder_with_default([0], None)
rt = RaggedTensor.from_row_splits(values, [0, 1])
self._TestGetItemException(rt, slice_spec, expected, message)
def testGetItemNewAxis(self):
# rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]
splits1 = [0, 3, 3]
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertEqual(
self.eval_to_list(rt),
[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertEqual(
self.eval_to_list(rt_newaxis0),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertEqual(
self.eval_to_list(rt_newaxis1),
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertEqual(
self.eval_to_list(rt_newaxis2),
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertEqual(
self.eval_to_list(rt_newaxis3),
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertEqual(
self.eval_to_list(rt_newaxis4),
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, None, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, None, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
#=============================================================================
# RaggedTensor.__str__
#=============================================================================
def testRaggedTensorStr(self):
values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g']
row_splits = [0, 2, 5, 6, 6, 7]
rt = RaggedTensor.from_row_splits(values, row_splits, validate=False)
splits_type = 'int64'
if context.executing_eagerly():
expected_repr = '<tf.RaggedTensor {}>'.format([[b'a', b'b'],
[b'c', b'd', b'e'], [b'f'],
[], [b'g']])
else:
expected_repr = (
'tf.RaggedTensor(values=Tensor("RaggedFromRowSplits/values:0", '
'shape=(7,), dtype=string), row_splits='
'Tensor("RaggedFromRowSplits/row_splits:0", '
'shape=(6,), dtype={}))').format(splits_type)
self.assertEqual(repr(rt), expected_repr)
self.assertEqual(str(rt), expected_repr)
def testRaggedTensorValueStr(self):
values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g']
row_splits = [0, 2, 5, 6, 6, 7]
rt = ragged_tensor_value.RaggedTensorValue(
np.array(values), np.array(row_splits, dtype=np.int64))
expected_str = '<tf.RaggedTensorValue {}>'.format([[b'a', b'b'],
[b'c', b'd', b'e'],
[b'f'], [], [b'g']])
expected_repr = ("tf.RaggedTensorValue(values=array({}, dtype='|S1'), "
'row_splits=array({}))'.format(values, row_splits))
self.assertEqual(' '.join(str(rt).split()), expected_str)
self.assertEqual(' '.join(repr(rt).split()), expected_repr)
#=============================================================================
# RaggedTensor.with_values() and RaggedTensor.with_flat_values().
#=============================================================================
def testWithValues(self):
rt1 = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]])
rt2 = ragged_factory_ops.constant([[[1, 2], [3, 4, 5]], [[6]], [], [[],
[7]]])
rt1_plus_10 = rt1.with_values(rt1.values + 10)
rt2_times_10 = rt2.with_flat_values(rt2.flat_values * 10)
rt1_expanded = rt1.with_values(array_ops.expand_dims(rt1.values, axis=1))
self.assertEqual(
self.eval_to_list(rt1_plus_10),
[[11, 12], [13, 14, 15], [16], [], [17]])
self.assertEqual(
self.eval_to_list(rt2_times_10),
[[[10, 20], [30, 40, 50]], [[60]], [], [[], [70]]])
self.assertEqual(
self.eval_to_list(rt1_expanded),
[[[1], [2]], [[3], [4], [5]], [[6]], [], [[7]]])
#=============================================================================
# Session.run
#=============================================================================
def testSessionRun(self):
if context.executing_eagerly():
return
rt1 = ragged_factory_ops.constant([[1, 2, 3], [4]])
rt2 = ragged_factory_ops.constant([[[], [1, 2]], [[3]]])
with self.test_session() as session:
result = session.run({'rt1': rt1, 'rt2': rt2})
self.assertCountEqual(result.keys(), ['rt1', 'rt2'])
self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]])
self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]])
def testSessionRunFeed(self):
if context.executing_eagerly():
return
rt1 = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int64))
rt2 = RaggedTensor.from_nested_row_splits(
array_ops.placeholder(dtypes.int32), [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64)
])
rt1_feed_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
rt2_feed_val = ragged_factory_ops.constant_value([[[], [1, 2]], [[3]]])
with self.test_session() as session:
fetches = {'rt1': rt1, 'rt2': rt2}
feeds = {rt1: rt1_feed_val, rt2: rt2_feed_val}
result = session.run(fetches, feed_dict=feeds)
self.assertCountEqual(result.keys(), ['rt1', 'rt2'])
self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]])
self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]])
def testSessionPartialRunFeed(self):
if context.executing_eagerly():
return
# Placeholder inputs.
a = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'),
array_ops.placeholder(dtypes.int64, name='a.row_splits'))
b = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='b.values'),
array_ops.placeholder(dtypes.int64, name='b.row_splits'))
c = array_ops.placeholder(dtypes.int32, shape=[], name='c')
# Feed values for placeholder inputs.
a_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
b_val = ragged_factory_ops.constant_value([[5, 4, 3], [2]])
c_val = 3
# Compute some values.
r1 = ragged_math_ops.reduce_sum(a * b, axis=1)
r2 = ragged_math_ops.reduce_sum(a + c, axis=1)
with self.test_session() as session:
handle = session.partial_run_setup([r1, r2], [a, b, c])
res1 = session.partial_run(handle, r1, feed_dict={a: a_val, b: b_val})
self.assertAllEqual(res1, [22, 8])
res2 = session.partial_run(handle, r2, feed_dict={c: c_val})
self.assertAllEqual(res2, [15, 7])
# Test case for GitHub issue 24679.
def testEagerForLoop(self):
if not context.executing_eagerly():
return
values = [[1., 2.], [3., 4., 5.], [6.]]
r = ragged_factory_ops.constant(values)
i = 0
for elem in r:
self.assertAllEqual(elem, values[i])
i += 1
def testConsumers(self):
if context.executing_eagerly():
return
a = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'),
array_ops.placeholder(dtypes.int64, name='a.row_splits'),
validate=False)
ragged_math_ops.reduce_sum(a)
self.assertLen(a.consumers(), 1)
@parameterized.parameters([
# from_value_rowids
{'descr': 'bad rank for value_rowids',
'factory': RaggedTensor.from_value_rowids,
'values': [[1, 2], [3, 4]],
'value_rowids': [[1, 2], [3, 4]],
'nrows': 10},
{'descr': 'bad rank for nrows',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4],
'nrows': [10]},
{'descr': 'len(values) != len(value_rowids)',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4, 5],
'nrows': 10},
{'descr': 'negative value_rowid',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [-5, 2, 3, 4],
'nrows': 10},
{'descr': 'non-monotonic-increasing value_rowid',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [4, 3, 2, 1],
'nrows': 10},
{'descr': 'value_rowid > nrows',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4],
'nrows': 2},
{'descr': 'bad rank for values',
'factory': RaggedTensor.from_value_rowids,
'values': 10,
'value_rowids': [1, 2, 3, 4],
'nrows': 10},
# from_row_splits
{'descr': 'bad rank for row_splits',
'factory': RaggedTensor.from_row_splits,
'values': [[1, 2], [3, 4]],
'row_splits': [[1, 2], [3, 4]]},
{'descr': 'row_splits[0] != 0',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [2, 3, 4]},
{'descr': 'non-monotonic-increasing row_splits',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [0, 3, 2, 4]},
{'descr': 'row_splits[0] != nvals',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [0, 2, 3, 5]},
{'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_splits,
'values': 10,
'row_splits': [0, 1]},
# from_row_lengths
{'descr': 'bad rank for row_lengths',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [[1, 2], [1, 0]]},
{'descr': 'negatve row_lengths',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [3, -1, 2]},
{'descr': 'sum(row_lengths) != nvals',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [2, 4, 2, 8]},
{'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_lengths,
'values': 10,
'row_lengths': [0, 1]},
# from_row_starts
{'descr': 'bad rank for row_starts',
'factory': RaggedTensor.from_row_starts,
'values': [[1, 2], [3, 4]],
'row_starts': [[1, 2], [3, 4]]},
{'descr': 'row_starts[0] != 0',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [2, 3, 4]},
{'descr': 'non-monotonic-increasing row_starts',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [0, 3, 2, 4]},
{'descr': 'row_starts[0] > nvals',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [0, 2, 3, 5]},
{'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_starts,
'values': 10,
'row_starts': [0, 1]},
# from_row_limits
{'descr': 'bad rank for row_limits',
'factory': RaggedTensor.from_row_limits,
'values': [[1, 2], [3, 4]],
'row_limits': [[1, 2], [3, 4]]},
{'descr': 'row_limits[0] < 0',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [-1, 3, 4]},
{'descr': 'non-monotonic-increasing row_limits',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [0, 3, 2, 4]},
{'descr': 'row_limits[0] != nvals',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [0, 2, 3, 5]},
{'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_limits,
'values': 10,
'row_limits': [0, 1]},
])
def testFactoryValidation(self, descr, factory, **kwargs):
# When input tensors have shape information, some of these errors will be
# detected statically.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(factory(**kwargs))
# Remove shape information (by wraping tensors in placeholders), and check
# that we detect the errors when the graph is run.
if not context.executing_eagerly():
def wrap_arg(v):
return array_ops.placeholder_with_default(
constant_op.constant(v, dtype=dtypes.int64),
tensor_shape.TensorShape(None))
kwargs = dict((k, wrap_arg(v)) for (k, v) in kwargs.items())
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(factory(**kwargs))
#=============================================================================
# RaggedTensor Variant conversion
#=============================================================================
@parameterized.parameters(
{
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1
}, {
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 1
}, {
'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]],
'ragged_rank': 2
})
def testRaggedToVariant(self, ragged_constant, ragged_rank):
rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank)
et = rt._to_variant()
self.assertEqual(et.shape.as_list(), [])
self.assertEqual(et.dtype, dtypes.variant)
@parameterized.parameters(
{
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1,
'num_batched_elems': 5
}, {
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 1,
'num_batched_elems': 4
}, {
'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]],
'ragged_rank': 2,
'num_batched_elems': 2
})
def testRaggedToBatchedVariant(self, ragged_constant, ragged_rank,
num_batched_elems):
rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank)
et = rt._to_variant(batched_input=True)
self.assertEqual(et.shape.as_list(), [num_batched_elems])
self.assertEqual(et.dtype, dtypes.variant)
@parameterized.parameters(
# 2D test cases.
{
'ragged_constant': [[]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1], [2], [3]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1,
},
# 3D test cases.
{
'ragged_constant': [[[]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2], [3, 4]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 2,
},
# 4D test cases.
{
'ragged_constant': [[[[1, 2], [3, 4]]],
[[[0, 0], [0, 0]], [[5, 6], [7, 8]]], []],
'ragged_rank': 3,
},
# dtype `string`.
{
'ragged_constant': [['a'], ['b'], ['c']],
'ragged_rank': 1,
'dtype': dtypes.string,
},
{
'ragged_constant': [[['a', 'b'], ['c', 'd']]],
'ragged_rank': 2,
'dtype': dtypes.string,
},
{
'ragged_constant': [[[['a', 'b'], ['c', 'd']]],
[[['e', 'f'], ['g', 'h']], [['i', 'j'],
['k', 'l']]], []],
'ragged_rank': 3,
'dtype': dtypes.string,
})
def testVariantRoundTrip(self,
ragged_constant,
ragged_rank,
dtype=dtypes.int32):
rt = ragged_factory_ops.constant(
ragged_constant, ragged_rank=ragged_rank, dtype=dtype)
et = rt._to_variant()
round_trip_rt = RaggedTensor._from_variant(
et, dtype, output_ragged_rank=ragged_rank)
self.assertRaggedEqual(rt, round_trip_rt)
def testBatchedVariantRoundTripInputRaggedRankInferred(self):
ragged_rank = 1
rt = ragged_factory_ops.constant(
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
ragged_rank=ragged_rank)
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [5, 2])
decoded_rt = RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=ragged_rank + 1)
expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4],
[5]],
[[6], [7]], [[8], [9]]])
self.assertRaggedEqual(decoded_rt, expected_rt)
def testBatchedVariantRoundTripWithInputRaggedRank(self):
ragged_rank = 1
rt = ragged_factory_ops.constant(
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
ragged_rank=ragged_rank)
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [5, 2])
decoded_rt = RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=ragged_rank + 1,
input_ragged_rank=ragged_rank - 1)
expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4],
[5]],
[[6], [7]], [[8], [9]]])
self.assertRaggedEqual(decoded_rt, expected_rt)
def testFromVariantInvalidParams(self):
rt = ragged_factory_ops.constant([[0], [1], [2], [3]])
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [2, 2])
with self.assertRaisesRegexp(ValueError,
'output_ragged_rank must be equal to'):
RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=1,
input_ragged_rank=1)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_tensor_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RaggedTensor operator dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
# Constants listing various op types to test. Each operation
# should be included in at least one list below, or tested separately if
# necessary (e.g., because it expects additional arguments).
UNARY_FLOAT_OPS = [
math_ops.abs,
math_ops.acos,
math_ops.acosh,
math_ops.angle,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.ceil,
math_ops.conj,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.floor,
math_ops.imag,
math_ops.is_finite,
math_ops.is_inf,
math_ops.is_nan,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.log_sigmoid,
math_ops.negative,
math_ops.real,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
array_ops.identity,
array_ops.ones_like,
array_ops.zeros_like,
]
UNARY_BOOL_OPS = [
math_ops.logical_not,
]
UNARY_STRING_OPS = [
string_ops.decode_base64,
string_ops.encode_base64,
string_ops.string_strip,
parsing_ops.decode_compressed,
]
BINARY_FLOAT_OPS = [
math_ops.add,
math_ops.atan2,
math_ops.complex,
math_ops.div_no_nan,
math_ops.divide,
math_ops.equal,
math_ops.floordiv,
math_ops.floormod,
math_ops.greater,
math_ops.greater_equal,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.realdiv,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truediv,
]
BINARY_BOOL_OPS = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor,
]
UNARY_INT_OPS = [
gen_bitwise_ops.invert,
string_ops.unicode_script,
]
BINARY_INT_OPS = [
gen_bitwise_ops.bitwise_and,
gen_bitwise_ops.bitwise_or,
gen_bitwise_ops.bitwise_xor,
gen_bitwise_ops.left_shift,
gen_bitwise_ops.right_shift,
math_ops.truncatediv,
math_ops.truncatemod,
]
@test_util.run_all_in_graph_and_eager_modes
class RaggedElementwiseOpsTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
def assertSameShape(self, x, y):
"""Checks that x and y have the same shape (including ragged shapes)."""
if isinstance(x, ragged_tensor.RaggedTensor):
self.assertIsInstance(y, ragged_tensor.RaggedTensor)
self.assertEqual(x.ragged_rank, y.ragged_rank)
for (x_splits, y_splits) in zip(x.nested_row_splits, y.nested_row_splits):
self.assertAllEqual(x_splits, y_splits)
self.assertAllEqual(
array_ops.shape(x.flat_values), array_ops.shape(y.flat_values))
else:
self.assertIsInstance(y, ops.Tensor)
self.assertAllEqual(array_ops.shape(x), array_ops.shape(y))
@parameterized.parameters(
#=========================================================================
# Test different input shapes.
#=========================================================================
[
# 0-dimensional input
{'x': 12},
# 1-dimensional input
{'x': [1, -2, 3]},
# 2-dimensional input
{'x': [[-2, 3], [-3, 4]]},
{'x': ragged_factory_ops.constant_value(
[[-2, 3], [-3]], ragged_rank=1)},
# 3-dimensional inputs
{'x': [[[-2, 3], [3, 4]], [[7, 6], [5, 4]]]},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3], [3, 4]], [[7, 6]]],
ragged_rank=1)},
{'x': ragged_factory_ops.constant_value(
[[[-2, 3, 4], []], [[7, 6]], []],
ragged_rank=2)},
] +
#=========================================================================
# Test each unary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]), 'op': op}
for op in UNARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, False], [True]]),
'op': op}
for op in UNARY_BOOL_OPS] +
[{'x': ragged_factory_ops.constant_value([[18, 512], [12412]], np.int32),
'op': op}
for op in UNARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([['abcd', 'efgh'],
['aabbccdd']]),
'op': op}
for op in UNARY_STRING_OPS] +
[
{'op': clip_ops.clip_by_value,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'clip_value_min': 0.1, 'clip_value_max': 4.0},
{'op': math_ops.cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': math_ops.saturate_cast,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'dtype': dtypes.int32},
{'op': string_ops.string_to_hash_bucket,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_fast,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000},
{'op': string_ops.string_to_hash_bucket_strong,
'x': ragged_factory_ops.constant_value(
[['abcd', 'efgh'], ['aabbccdd']]),
'num_buckets': 1000,
'key': [1231, 12512]},
{'op': string_ops.string_to_number,
'x': ragged_factory_ops.constant_value([['-2.0', '3.0'], ['-3.0']])},
{'op': string_ops.regex_full_match,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\w+'},
{'op': string_ops.regex_replace,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pattern': r'\d',
'rewrite': '#'},
{'op': string_ops.substr,
'x': ragged_factory_ops.constant_value([['hello', '123'], ['1+1']]),
'pos': 2, 'len': 3},
{'op': array_ops.check_numerics,
'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'message': 'check-numerics'},
]
) # pyformat: disable
def testUnaryElementwiseOp(self, x, op=math_ops.abs, **extra_args):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x)
result = op(x, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
expected_flat_values = array_ops.reshape(op(dense_x, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(x, result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
@parameterized.parameters(
[
#=====================================================================
# Without broadcasting -- i.e., shapes match exactly.
#=====================================================================
# Shapes: x:(), y:()
{'x': 12,
'y': 8},
# Shapes: x:(3,), y:(3,)
{'x': [7, 8, 9],
'y': [1, -2, 3]},
# Shapes: x:(2, 2), y:(2, 2)
{'x': [[-2, 3], [-3, -4]],
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, None), y:(2, None)
{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 6], [7]])},
# Shapes: x:(2, 2, 2), y:(2, 2, 2)
{'x': [[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
'y': [[[9, 3], [3, 4]], [[5, 2], [7, 6]]]},
# Shapes: x:(2, None, None), y: (2, None, None)
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]])},
# Shapes: x:(2, None, 2), y: (2, None, 2)
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1)},
#=====================================================================
# With broadcasting
#=====================================================================
# Shapes: x:(), y:(3,)
{'x': 12, # Broadcast () -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(1,), y:(3,)
{'x': [12], # Broadcast (1,) -> (3,)
'y': [1, -2, 3]},
# Shapes: x:(), y:(2, 2)
{'x': 12, # Broadcast () -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(1,), y:(2, 2)
{'x': 12, # Broadcast (1,) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(2, 1), y:(2, 2)
{'x': [[10], [20]], # Broadcast (2, 1) -> (2, 2)
'y': [[1, 2], [3, 4]]},
# Shapes: x:(), y:(2, None)
{'x': 10, # Broadcast () -> (2, None)
'y': ragged_factory_ops.constant_value(
[[1, 2], [3]], dtype=np.int32)},
# TODO(edloper): Add tests for more advanced broadcasting, once we add
# support for it.
#=====================================================================
# Keyword Args
#=====================================================================
{'x': ragged_factory_ops.constant_value(
[[[1, 2], [3], [4]], [[], [5, 7, 8]]]),
'y': ragged_factory_ops.constant_value(
[[[3, 8], [2], [5]], [[], [1, 9, 8]]]),
'use_kwargs': ('x', 'y')},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': ('x', 'y')},
{'x': ragged_factory_ops.constant_value(
[[[1, 2]], [[3, 4], [5, 6], [7, 8]]],
ragged_rank=1),
'y': ragged_factory_ops.constant_value(
[[[9, 3]], [[5, 2], [3, 4], [7, 6]]],
ragged_rank=1),
'use_kwargs': ('x',)},
] +
#=========================================================================
# Test each unary op.
#=========================================================================
[{'x': ragged_factory_ops.constant_value([[-2.0, 3.0], [-3.0]]),
'y': ragged_factory_ops.constant_value([[5.0, 1.0], [12.0]]),
'op': op}
for op in BINARY_FLOAT_OPS] +
[{'x': ragged_factory_ops.constant_value([[-2, 3], [-3]]),
'y': ragged_factory_ops.constant_value([[5, 1], [12]]),
'op': op}
for op in BINARY_INT_OPS] +
[{'x': ragged_factory_ops.constant_value([[True, True], [False]]),
'y': ragged_factory_ops.constant_value([[False, True], [False]]),
'op': op}
for op in BINARY_BOOL_OPS]
) # pyformat: disable
def testBinaryElementwiseOp(self, x, y, op=math_ops.add, **extra_args):
use_kwargs = extra_args.pop('use_kwargs', ())
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y)
if 'x' in use_kwargs and 'y' in use_kwargs:
result = op(x=x, y=y, **extra_args)
elif 'y' in use_kwargs:
result = op(x, y=y, **extra_args)
else:
result = op(x, y, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_x = x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
dense_y = y.flat_values if isinstance(y, ragged_tensor.RaggedTensor) else y
expected_flat_values = array_ops.reshape(
op(dense_x, dense_y, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(y, result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
@parameterized.parameters(
[
{'inputs': (12, 8, 3)},
{'inputs': ([1, 2, 3], [7, 8, 9], [3, 6, 9])},
{'inputs': ([[1, 2]], [[3, 4]], [[5, 6]])},
{'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'inputs': (ragged_factory_ops.constant_value(
[[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value(
[[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value(
[[[2, 9], [12]], [[8]]]))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [3, 4]], [[1, 5]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[4, 7], [1, 2]], [[2, 2]]],
ragged_rank=1),
ragged_factory_ops.constant_value([[[2, 9], [5, 2]], [[8, 0]]],
ragged_rank=1))},
{'inputs': (
ragged_factory_ops.constant_value([[[1, 3], [-3]], [[1]]]),
ragged_factory_ops.constant_value([[[4, 7], [88]], [[2]]]),
ragged_factory_ops.constant_value([[[2, 9], [12]], [[8]]])),
'use_kwargs': True},
] + [
{'op': math_ops.add_n,
'inputs': (ragged_factory_ops.constant_value([[1, 3], [-3]]),
ragged_factory_ops.constant_value([[4, 7], [88]]),
ragged_factory_ops.constant_value([[2, 9], [12]]))},
{'op': string_ops.string_join,
'inputs': (
ragged_factory_ops.constant_value([['a', 'b'], ['c']]),
ragged_factory_ops.constant_value([['foo', 'bar'], ['baz']]),
ragged_factory_ops.constant_value([['2', '9'], ['12']]))},
]) # pyformat: disable
def testListValuedElementwiseOp(self, inputs, op=math_ops.add_n,
**extra_args):
use_kwargs = extra_args.pop('use_kwargs', False)
inputs = [
ragged_tensor.convert_to_tensor_or_ragged_tensor(x) for x in inputs
]
if use_kwargs:
result = op(inputs=inputs, **extra_args)
else:
result = op(inputs, **extra_args)
# Run the wrapped op on the dense values, for comparison.
dense_inputs = [
x.flat_values if isinstance(x, ragged_tensor.RaggedTensor) else x
for x in inputs
]
expected_flat_values = array_ops.reshape(
op(dense_inputs, **extra_args), [-1])
# Check that the result has the expected shape.
self.assertSameShape(inputs[0], result)
# Check that the result has the expected (flattened) values.
if isinstance(result, ragged_tensor.RaggedTensor):
result_flat_values = array_ops.reshape(result.flat_values, [-1])
else:
result_flat_values = array_ops.reshape(result, [-1])
self.assertAllEqual(expected_flat_values, result_flat_values)
def testElementwiseOpUnknownRankError(self):
if context.executing_eagerly():
return
x = ragged_factory_ops.constant([[1, 2], [3]])
y = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.placeholder_with_default([1, 2, 3], shape=None), x.row_splits)
with self.assertRaisesRegexp(ValueError,
r'Unable to broadcast: unknown rank'):
math_ops.add(x, y)
@parameterized.parameters([
dict(
x=ragged_factory_ops.constant_value([[1, 2], [3]]),
y=[[10]],
expected=[[11, 12], [13]]),
dict(
x=ragged_factory_ops.constant_value([[[1, 2], [3, 4]], [[5]]],
ragged_rank=2),
y=ragged_factory_ops.constant_value([[[10], [20]], [[30]]],
ragged_rank=1),
expected=[[[11, 12], [23, 24]], [[35]]]),
dict(
x=ragged_factory_ops.constant_value([[[1]]]),
y=ragged_factory_ops.constant_value([[1]]),
expected=[[[2]]]),
])
def testElementwiseOpBroadcast(self, x, y, expected):
x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, dtype=dtypes.int32)
y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, dtype=dtypes.int32)
result = x + y
self.assertRaggedEqual(result, expected)
def testElementwiseOpShapeMismatch(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = ragged_factory_ops.constant([[1, 2, 3], [4, 5, 6]])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(math_ops.add(x, y))
def testBinaryOpSparseAndRagged(self):
x = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
y = sparse_tensor.SparseTensor([[0, 0], [0, 1], [2, 0]], [1, 2, 3], [3, 2])
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add(x, y))
with self.assertRaises((TypeError, ValueError)):
self.evaluate(math_ops.add_n([x, y]))
@parameterized.parameters([
dict(
op=array_ops.batch_gather,
args=(ragged_factory_ops.constant_value([[5, 6, 7], [8, 9]]),
ragged_factory_ops.constant_value([[2, 1, 0], [1]])),
expected=ragged_factory_ops.constant_value([[7, 6, 5], [9]])),
dict(
op=array_ops.concat,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
kwargs={'axis': 0},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4], [5, 6]])),
dict(
op=array_ops.expand_dims,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': 0
},
expected=ragged_factory_ops.constant_value([[[1, 2], [3]]])),
dict(
op=array_ops.expand_dims_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[1, 2], [3]]),
'axis': -1
},
expected=ragged_factory_ops.constant_value([[[1], [2]], [[3]]],
ragged_rank=1),
),
dict(
op=array_ops.gather,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': [1, 0, 1]
},
expected=ragged_factory_ops.constant_value([[3], [1, 2], [3]])),
dict(
op=array_ops.gather_v2,
kwargs={
'params': ragged_factory_ops.constant_value([[1, 2], [3]]),
'indices': ragged_factory_ops.constant_value([[1, 0], [1]])
},
expected=ragged_factory_ops.constant_value([[[3], [1, 2]], [[3]]])),
dict(
op=array_ops.gather_nd,
kwargs={
'params': ragged_factory_ops.constant_value([[7, 8], [9]]),
'indices': [[0, 1], [1, 0], [0, 0]]
},
expected=ragged_factory_ops.constant_value([8, 9, 7])),
dict(
op=array_ops.stack,
args=([
ragged_factory_ops.constant_value([[1, 2, 3], [4]],
dtype=np.int32),
np.array([[5, 6]], dtype=np.int32)
],),
expected=ragged_factory_ops.constant_value([[[1, 2, 3], [4]],
[[5, 6]]])),
dict(
op=array_ops.tile,
args=([
ragged_factory_ops.constant_value([[1, 2], [3]], dtype=np.int32),
[2, 3]
]),
expected=ragged_factory_ops.constant_value([[1, 2, 1, 2, 1, 2],
[3, 3, 3],
[1, 2, 1, 2, 1, 2],
[3, 3, 3]])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),
ragged_factory_ops.constant_value([[b'A', b'B'], [b'C']]),
ragged_factory_ops.constant_value([[b'a', b'b'], [b'c']])),
expected=ragged_factory_ops.constant_value([[b'A', b'b'], [b'C']])),
dict(
op=array_ops.where,
args=(ragged_factory_ops.constant_value([[True, False], [True]]),),
expected=[[0, 0], [1, 0]]),
dict(
op=math_ops.unsorted_segment_sum,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[4, 0, 2]),
dict(
op=math_ops.unsorted_segment_prod,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 2], [0]]),
'num_segments': 3
},
expected=[3, 1, 2]),
dict(
op=math_ops.unsorted_segment_min,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[1, 2]),
dict(
op=math_ops.unsorted_segment_max,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[3, 2]),
dict(
op=math_ops.unsorted_segment_mean,
kwargs={
'data': ragged_factory_ops.constant_value([[1, 2], [3]]),
'segment_ids': ragged_factory_ops.constant_value([[0, 1], [0]]),
'num_segments': 2
},
expected=[2, 2]),
dict(
op=math_ops.unsorted_segment_sqrt_n,
kwargs={
'data':
ragged_factory_ops.constant_value([[1.0, 2.0],
[3.0, 4.0, 6.0]]),
'segment_ids':
ragged_factory_ops.constant_value([[0, 1], [0, 0, 0]]),
'num_segments':
2
},
expected=[7.0, 2.0]),
dict(
op=math_ops.reduce_sum,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[3, 12]),
dict(
op=math_ops.reduce_prod,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 60]),
dict(
op=math_ops.reduce_min,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[1, 3]),
dict(
op=math_ops.reduce_max,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 2], [3, 4, 5]]),
'axis':
1
},
expected=[2, 5]),
dict(
op=math_ops.reduce_mean,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[1, 3], [3, 4, 5]]),
'axis':
1
},
expected=[2, 4]),
dict(
op=math_ops.reduce_any,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[True, True]),
dict(
op=math_ops.reduce_all,
kwargs={
'input_tensor':
ragged_factory_ops.constant_value([[True, False],
[True, True, True]]),
'axis':
1
},
expected=[False, True]),
dict(
op=array_ops.rank,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=2),
dict(
op=array_ops.size,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.size_v2,
kwargs={'input': ragged_factory_ops.constant_value([[8, 3], [5]])},
expected=3),
dict(
op=array_ops.squeeze,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
dict(
op=array_ops.squeeze_v2,
kwargs={
'input': ragged_factory_ops.constant_value([[[1, 2, 3], [4, 5]]]),
'axis': [0]
},
expected=ragged_factory_ops.constant_value([[1, 2, 3], [4, 5]])),
])
def testRaggedDispatch(self, op, expected, args=(), kwargs=None):
if kwargs is None: kwargs = {}
result = op(*args, **kwargs)
self.assertRaggedEqual(result, expected)
if __name__ == '__main__':
googletest.main()
|
tensorflow-master
|
tensorflow/python/ops/ragged/ragged_dispatch_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.util import nest
def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
"""Computes jacobian of `output` w.r.t. `inputs`.
Args:
output: A tensor.
inputs: A tensor or a nested structure of tensor objects.
use_pfor: If true, uses pfor for computing the jacobian. Else uses
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor or a nested strucutre of tensors with the same structure as
`inputs`. Each entry is the jacobian of `output` w.rt. to the corresponding
value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
shape [x_1, ..., x_m], the corresponding jacobian has shape
[y_1, ..., y_n, x_1, ..., x_m].
"""
flat_inputs = nest.flatten(inputs)
output_tensor_shape = output.shape
output_shape = array_ops.shape(output)
output = array_ops.reshape(output, [-1])
def loop_fn(i):
y = array_ops.gather(output, i)
return gradient_ops.gradients(y, flat_inputs)
try:
output_size = int(output.shape[0])
except TypeError:
output_size = array_ops.shape(output)[0]
if use_pfor:
pfor_outputs = control_flow_ops.pfor(
loop_fn, output_size, parallel_iterations=parallel_iterations)
else:
pfor_outputs = control_flow_ops.for_loop(
loop_fn,
[output.dtype] * len(flat_inputs),
output_size,
parallel_iterations=parallel_iterations)
for i, out in enumerate(pfor_outputs):
if out is not None:
new_shape = array_ops.concat(
[output_shape, array_ops.shape(out)[1:]], axis=0)
out = array_ops.reshape(out, new_shape)
out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
pfor_outputs[i] = out
return nest.pack_sequence_as(inputs, pfor_outputs)
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
"""Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
e.g.
x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
y = x * x
jacobian = batch_jacobian(y, x)
# => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
Args:
output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
only depend on `inp[i,...]`.
inp: A tensor with shape [b, x1, ..., x_m]
use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
tf.while_loop.
parallel_iterations: A knob to control how many iterations and dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
per-example jacobians.
Raises:
ValueError: if first dimension of `output` and `inp` do not match.
"""
output_shape = output.shape
if not output_shape[0].is_compatible_with(inp.shape[0]):
raise ValueError("Need first dimension of output shape (%s) and inp shape "
"(%s) to match." % (output.shape, inp.shape))
if output_shape.is_fully_defined():
batch_size = int(output_shape[0])
output_row_size = output_shape.num_elements() // batch_size
else:
output_shape = array_ops.shape(output)
batch_size = output_shape[0]
output_row_size = array_ops.size(output) // batch_size
inp_shape = array_ops.shape(inp)
# Flatten output to 2-D.
with ops.control_dependencies(
[check_ops.assert_equal(batch_size, inp_shape[0])]):
output = array_ops.reshape(output, [batch_size, output_row_size])
def loop_fn(i):
y = array_ops.gather(output, i, axis=1)
return gradient_ops.gradients(y, inp)[0]
if use_pfor:
pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
parallel_iterations=parallel_iterations)
else:
pfor_output = control_flow_ops.for_loop(
loop_fn, output.dtype,
output_row_size,
parallel_iterations=parallel_iterations)
if pfor_output is None:
return None
pfor_output = array_ops.reshape(pfor_output,
[output_row_size, batch_size, -1])
output = array_ops.transpose(pfor_output, [1, 0, 2])
new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
return array_ops.reshape(output, new_shape)
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/gradients.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
from absl import flags
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegexp(ValueError, "No converter defined"):
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = True
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = False
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegexp(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegexp(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,
parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
def test_vectorized_map(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
result = pfor_control_flow_ops.vectorized_map(
compute, array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
@test_util.run_all_in_graph_and_eager_modes
class ReductionTest(PForTestCase):
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn(object):
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegexp(
ValueError, "parallel_iterations currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_fused_batch_norm(self):
with compat.forward_compatibility_horizon(2019, 6, 7):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is
# False. It looks like CPU and GPU have different outputs for
# batch_mean and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
def test_log_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.log_softmax(logits_i),
nn.log_softmax(logits_i, axis=0),
nn.log_softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.softmax(logits_i),
nn.softmax(logits_i, axis=0),
nn.softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_invariant_alpha(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_varying_alpha(self):
alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
alphas_i = array_ops.gather(alphas, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),
random_ops.random_gamma(alpha=alphas_i, shape=[]),
random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),
random_ops.random_gamma(alpha=alphas_i, shape=[3]))
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)
def test_random_poisson_v2_invariant_rate(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_varying_rate(self):
rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
rates_i = array_ops.gather(rates, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),
random_ops.random_poisson(lam=rates_i, shape=[]),
random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),
random_ops.random_poisson(lam=rates_i, shape=[3]))
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.float32] * 4)
def test_random_multinomial_invariant_logits(self):
def loop_fn(_):
return random_ops.categorical(logits=[[1., -1.]], num_samples=3)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])
def test_random_multinomial_varying_logits(self):
logits = random_ops.random_normal([5, 3, 2])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return random_ops.categorical(logits_i, num_samples=3)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=[dtypes.int64])
class LoggingTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
[0], [[i, 2]]).scatter([1], [[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class ControlFlowTest(PForTestCase):
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i,
body,
(0, 0))
return s
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [array_ops.where(done, s, ns) for s, ns in
zip(nest.flatten(state), nest.flatten(new_state))]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(
lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(
lambda t, x: t < i,
lambda t, x: (t + 1, x + i),
[0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
128, 512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
def benchmark_reduction(self):
n = 1024
with ops.Graph().as_default():
x = random_ops.random_uniform([n, n])
w = random_ops.random_uniform([n, n])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return math_ops.reduce_sum(
math_ops.matmul(pfor_config.reduce_concat(x_i), w))
# Note that output_reduction will be tiled, so there may be some minor
# overheads compared to output_no_reduction.
output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)
output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))
# Benchmark to test that reduction does not add overhead and its output is
# treated as loop invariant.
self._run(output_reduction, 30, name="matmul_reduction")
self._run(output_no_reduction, 30, name="matmul_no_reduction")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/control_flow_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for pfor, for_loop, jacobian."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.parallel_for import * # pylint: disable=wildcard-import
from tensorflow.python.ops.parallel_for.control_flow_ops import for_loop
from tensorflow.python.ops.parallel_for.control_flow_ops import pfor
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
from tensorflow.python.ops.parallel_for.gradients import jacobian
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/__init__.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
# out may be a ref tensor, wrap it in identity to get a non-ref tensor.
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data depenendency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn, iters, parallel_iterations=parallel_iterations)
control_flow_context = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
# Note that we wrap into a tf.function if in eager execution mode or under
# XLA compilation. The latter is so that we don't compile operations like
# tf.placeholder that are created by the loop body.
if (context.executing_eagerly() or
(control_flow_context is not None and
control_flow_context.IsXLAContext())):
f = function.defun(f)
return f()
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn, iters, parallel_iterations=None, pfor_config=None):
"""Implementation of pfor."""
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder(dtypes.int32, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops, pfor_config=pfor_config)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
pfor_config=pfor_config)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
return nest.flatten(loop_fn(i + offset, pfor_config=pfor_config))
else:
return nest.flatten(loop_fn(i + offset))
return _pfor_impl(
tiled_loop_fn, parallel_iterations, pfor_config=pfor_config)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
@tf_export("vectorized_map")
def vectorized_map(fn, elems):
"""Parallel map on the list of tensors unpacked from `elems` on dimension 0.
This method works similar to tf.map_fn but is optimized to run much faster,
but possibly with a much larger memory footprint. The speedups are obtained by
vectorization (see https://arxiv.org/pdf/1903.04243.pdf). The idea behind
vectorization is to semantically launch all the invocations of `fn` in
parallel and fuse corresponding operations across all these invocations. This
fusion is done statically at graph generation time and the generated code is
often similar in performance to a manually fused version.
For example, let's look at a method that calculates the outer product of a
matrix.
```python
def outer_product(a):
return tf.tensordot(a, a, 0)
# outer_product was designed to not support batching.
c = outer_product(tf.ones((2, 3)))
# The shape is consistent
assert c.shape == (2, 3, 2, 3)
```
Now suppose we want an efficient batched version of outer_product. We can
simply write:
```python
batch_size = 100
a = tf.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
assert c.shape == (batch_size, 32, 32, 32, 32)
```
Because `tf.vectorized_map` fully parallelizes the batch, this method will
generally be significantly faster than using `tf.map_fn`, especially in eager
mode.
This is an experimental feature and currently has a lot of limitations:
- There should be no data dependency between the different semantic
invocations of `fn`, i.e. it should be safe to map the elements of the
inputs in any order.
- Stateful kernels may mostly not be supported since these often imply a
data dependency. We do support a limited set of such stateful kernels
though (like RandomFoo, Variable operations like reads, etc).
- `fn` has limited support for control flow operations. `tf.cond` in
particular is not supported.
- `fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `fn` outputs should not depend on the input
to `fn`.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`, and returns a possibly
nested structure of Tensors and Operations, which may be different than
the structure of `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be mapped over by `fn`.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
"""
def loop_fn(i):
gathered_elems = nest.map_structure(lambda x: array_ops.gather(x, i), elems)
return fn(gathered_elems)
batch_size = array_ops.shape(nest.flatten(elems)[0])[0]
return pfor(loop_fn, batch_size)
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/control_flow_ops.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""XLA tests for pfor."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class PForTest(PForTestCase):
def test_xla(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
def vectorized_compute(x):
return pfor_control_flow_ops.vectorized_map(compute, x)
result = xla.compile(vectorized_compute,
inputs=[array_ops.ones((10, 5, 3))])
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/xla_control_flow_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class PForTestCase(test.TestCase):
"""Base class for test cases."""
def _run_targets(self, targets1, targets2=None, run_init=True):
targets1 = nest.flatten(targets1)
targets2 = ([] if targets2 is None else nest.flatten(targets2))
assert len(targets1) == len(targets2) or not targets2
if run_init:
init = variables.global_variables_initializer()
self.evaluate(init)
return self.evaluate(targets1 + targets2)
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
outputs = nest.flatten(outputs) # flatten SparseTensorValues
n = len(outputs) // 2
for i in range(n):
if outputs[i + n].dtype != np.object:
self.assertAllClose(outputs[i + n], outputs[i], rtol=1e-4, atol=1e-5)
else:
self.assertAllEqual(outputs[i + n], outputs[i])
def _test_loop_fn(self, loop_fn, iters,
loop_fn_dtypes=dtypes.float32,
parallel_iterations=None):
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=iters,
parallel_iterations=parallel_iterations)
t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
parallel_iterations=parallel_iterations)
self.run_and_assert_equal(t1, t2)
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/test_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jacobian and batch_jacobian ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.layers import layers as tf_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops as tf_control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.parallel_for import control_flow_ops
from tensorflow.python.ops.parallel_for import gradients
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class FullyConnectedModel(object):
def __init__(self, activation_size, num_layers):
self._layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
def __call__(self, inp):
activation = inp
for layer in self._layers:
activation = layer(activation)
return activation
def fully_connected_model_fn(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size, num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
return inp, model(inp)
def lstm_model_fn(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
state = init_state
for inp in inputs:
_, state = cell(inp, state)
return init_state.c, state.c
def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = constant_op.constant(
np.random.randint(0, size=[batch_size], high=max_steps + 1),
dtype=dtypes.int32)
cell = rnn_cell.BasicLSTMCell(state_size)
initial_state = cell.zero_state(batch_size, dtypes.float32)
return inputs, rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state)
def create_fc_batch_jacobian(batch_size, activation_size, num_layers):
inp, output = fully_connected_model_fn(batch_size, activation_size,
num_layers)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_lstm_batch_jacobian(batch_size, state_size, steps, inputs_size=None):
inp, output = lstm_model_fn(batch_size, state_size, steps,
inputs_size=inputs_size)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_dynamic_lstm_batch_jacobian(batch_size, state_size, max_steps):
inp, (_, final_state) = dynamic_lstm_model_fn(batch_size, state_size,
max_steps)
pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True)
# Note that use_pfor=False does not work above given the current limitations
# on implementation of while_loop. So we statically unroll the looping in the
# jacobian computation.
while_gradients = [
gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0]
for i in range(state_size)
]
return pfor_jacobian, while_gradients
def create_lstm_batch_hessian(batch_size, state_size, steps):
inp, output = lstm_model_fn(batch_size, state_size, steps)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
pfor_jacobian = array_ops.reshape(pfor_jacobian, [batch_size, -1])
pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True)
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobian for computing while_hessian.
while_jacobian = pfor_jacobian
while_hessian = gradients.batch_jacobian(while_jacobian, inp, use_pfor=False)
return pfor_hessian, while_hessian
def create_lstm_hessian(batch_size, state_size, steps):
_, output = lstm_model_fn(batch_size, state_size, steps)
weights = variables.trainable_variables()
pfor_jacobians = gradients.jacobian(output, weights, use_pfor=True)
pfor_hessians = [
gradients.jacobian(x, weights, use_pfor=True) for x in pfor_jacobians
]
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobians for computing while_hessians.
while_jacobians = pfor_jacobians
while_hessians = [
gradients.jacobian(x, weights, use_pfor=False) for x in while_jacobians
]
return pfor_hessians, while_hessians
def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
inp = random_ops.random_normal([batch_size, activation_size])
layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
projection = tf_layers.Dense(1)
def model_fn(activation):
for layer in layers:
activation = layer(activation)
activation = projection(activation)
activation = nn.l2_loss(activation)
return gradient_ops.gradients(activation, variables.trainable_variables())
def loop_fn(i):
return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
def create_lstm_per_eg_grad(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
def model_fn(inps, init_state):
state = init_state
for inp in inps:
_, state = cell(inp, state)
output = nn.l2_loss(state.c)
return gradient_ops.gradients(output, variables.trainable_variables())
def loop_fn(i):
loop_inputs = [
array_ops.expand_dims(array_ops.gather(x, i), 0) for x in inputs
]
loop_init_state = rnn_cell.LSTMStateTuple(
*[array_ops.expand_dims(array_ops.gather(x, i), 0) for x in init_state])
return model_fn(loop_inputs, loop_init_state)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
# Importing the code from tensorflow_models seems to cause errors. Hence we
# duplicate the model definition here.
# TODO(agarwal): Use the version in tensorflow_models/official instead.
class Mnist(keras_training.Model):
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
"""
super(Mnist, self).__init__()
if data_format == "channels_first":
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == "channels_last"
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf_layers.Conv2D(
32, 5, padding="same", data_format=data_format, activation=nn.relu)
self.conv2 = tf_layers.Conv2D(
64, 5, padding="same", data_format=data_format, activation=nn.relu)
self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
self.fc2 = tf_layers.Dense(10)
self.dropout = tf_layers.Dropout(0.4)
self.max_pool2d = tf_layers.MaxPooling2D(
(2, 2), (2, 2), padding="same", data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = array_ops.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf_layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def create_mnist_autobatch(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
manual = model(images, training=training)
def loop_fn(i):
image = array_ops.gather(images, i)
return model(image, training=training)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, dtypes.float32, batch_size)
return pfor_outputs, while_outputs, manual
def create_mnist_per_eg_grad(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
sparse_labels = np.random.randint(
low=0, high=10, size=[batch_size]).astype(np.int32)
labels = np.zeros((batch_size, 10)).astype(np.float32)
labels[np.arange(batch_size), sparse_labels] = 1.
model = Mnist(data_format)
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
while_outputs = control_flow_ops.for_loop(
loop_fn, [dtypes.float32] * len(variables.trainable_variables()),
batch_size)
return pfor_outputs, while_outputs
def create_mnist_batch_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
logits = model(images, training=training)
pfor_jacobian = gradients.batch_jacobian(logits, images, use_pfor=True)
while_jacobian = gradients.batch_jacobian(logits, images, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_mnist_per_eg_jacobian(batch_size, data_format, training):
images = random_ops.random_uniform([batch_size, 28, 28])
model = Mnist(data_format)
def loop_fn(i, use_pfor):
image = array_ops.gather(images, i)
logits = array_ops.reshape(model(image, training=training), [-1])
return gradients.jacobian(
logits, variables.trainable_variables(), use_pfor=use_pfor)
pfor_outputs = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
while_outputs = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return pfor_outputs, while_outputs
def create_fc_per_eg_jacobians(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size=activation_size,
num_layers=num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
output = model(inp)
jacobians = gradients.jacobian(output, variables.trainable_variables())
def loop_fn(i, use_pfor):
inp_i = array_ops.expand_dims(array_ops.gather(inp, i), 0)
output = array_ops.reshape(model(inp_i), [-1])
return gradients.jacobian(
output, variables.trainable_variables(), use_pfor=use_pfor)
per_eg_jacobians_pfor = control_flow_ops.pfor(
functools.partial(loop_fn, use_pfor=True),
batch_size)
per_eg_jacobians_while = control_flow_ops.for_loop(
functools.partial(loop_fn, use_pfor=False),
[dtypes.float32] * len(variables.trainable_variables()), batch_size)
return jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while
@test_util.run_v1_only("b/122612051")
class GradientsTest(test.TestCase):
def run_and_assert_equal(self, targets1, targets2, atol=1e-4, rtol=1e-4):
targets1 = nest.flatten(targets1)
targets2 = nest.flatten(targets2)
assert len(targets1) == len(targets2)
init = variables.global_variables_initializer()
self.evaluate(init)
outputs = self.evaluate(targets1 + targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllClose(outputs[i], outputs[i + n], rtol=rtol, atol=atol)
def test_no_path(self):
for grad_func in [gradients.jacobian, gradients.batch_jacobian]:
for use_pfor in [True, False]:
x = constant_op.constant([[1.0]])
y = constant_op.constant([[2.0]])
self.assertIsNone(grad_func(y, x, use_pfor=use_pfor))
def test_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 2])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
self.run_and_assert_equal(answer, jacobian_pfor)
self.run_and_assert_equal(answer, jacobian_while)
def test_jacobian_scan_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
elems = random_ops.random_uniform([6])
# Shape y: [6, 3, 4]
y = functional_ops.scan(lambda a, e: a + e, elems, initializer=x)
jacobian = gradients.jacobian(y, x)
expected_shape = [6, 3, 4, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_while_loop_shape(self):
# Shape x: [3, 4]
x = random_ops.random_uniform([3, 4])
_, y = tf_control_flow_ops.while_loop(lambda i, a: i > 5.,
lambda i, a: (i + 1, a + i),
(constant_op.constant(0.), x))
# Shape y: [2, 3]
y = y[:2, :3]
jacobian = gradients.jacobian(y, x)
expected_shape = [2, 3, 3, 4]
self.assertAllEqual(expected_shape, jacobian.shape.as_list())
def test_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, shape=[None, None])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
ans, pfor_value, while_value = sess.run(
[answer, jacobian_pfor, jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
y = math_ops.matmul(x, x)
self.assertAllClose(gradients.jacobian(y, x, parallel_iterations=2),
gradients.jacobian(y, x, parallel_iterations=3))
def test_batch_jacobian_bad_shapes(self):
x = random_ops.random_uniform([2, 2])
y = random_ops.random_uniform([3, 2])
with self.assertRaisesRegexp(ValueError, "Need first dimension of output"):
gradients.batch_jacobian(y, x, use_pfor=True)
def test_batch_jacobian_bad_unknown_shapes(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.concat([x, x], axis=0)
jacobian = gradients.batch_jacobian(y, x)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"assertion failed"):
sess.run(jacobian, feed_dict={x: [[1, 2], [3, 4]]})
def test_batch_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 3, 5])
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
self.run_and_assert_equal(answer, batch_jacobian_pfor)
self.run_and_assert_equal(answer, batch_jacobian_while)
def test_batch_jacobian_unknown_shape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = x * x
batch_jacobian_pfor = gradients.batch_jacobian(y, x, use_pfor=True)
batch_jacobian_while = gradients.batch_jacobian(y, x, use_pfor=False)
two_x = 2 * x
answer = array_ops.stack(
[array_ops.diag(two_x[0]),
array_ops.diag(two_x[1])])
ans, pfor_value, while_value = sess.run(
[answer, batch_jacobian_pfor, batch_jacobian_while],
feed_dict={x: [[1, 2], [3, 4]]})
self.assertAllClose(ans, pfor_value)
self.assertAllClose(ans, while_value)
def test_batch_jacobian_parallel_iterations(self):
x = constant_op.constant([[1., 2], [3, 4]])
w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]])
y = math_ops.matmul(x, w)
self.assertAllClose(gradients.batch_jacobian(y, x, parallel_iterations=2),
gradients.batch_jacobian(y, x, parallel_iterations=3))
def test_fc_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(8, 4, 2)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
def test_lstm_batch_jacobian(self):
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(8, 4, 2,
inputs_size=128)
self.run_and_assert_equal(pfor_jacobian, while_jacobian)
@test_util.disable_xla("This test never passed for XLA")
def DISABLED_test_dynamic_lstm_batch_jacobian(self):
pfor_jacobian, while_gradients = create_dynamic_lstm_batch_jacobian(8, 4, 3)
with session.Session() as sess:
init = variables.global_variables_initializer()
self.evaluate(init)
pfor = self.evaluate(pfor_jacobian)
for i in range(4):
while_i = sess.run(while_gradients[i])
self.assertAllClose(while_i, pfor[:, i, ...])
def test_lstm_hessian(self):
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_lstm_batch_hessian(self):
pfor_hessian, while_hessian = create_lstm_batch_hessian(2, 2, 2)
self.run_and_assert_equal(pfor_hessian, while_hessian)
def test_fc_per_eg_grad(self):
pfor_outputs, while_outputs = create_fc_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_lstm_per_eg_grad(self):
pfor_outputs, while_outputs = create_lstm_per_eg_grad(8, 4, 2)
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_mnist_per_eg_grad(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
4, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_mnist_per_eg_jacobian(self):
# It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
# configuration of Winograd can cause low precision output resulting in
# tests failing. So we disable that here.
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
# the same result with pfor and with while_loop.
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
2, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_fc_jacobian(self):
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=8,
activation_size=4,
num_layers=2))
self.run_and_assert_equal(jacobians, per_eg_jacobians_pfor,
rtol=2e-3, atol=1e-3)
self.run_and_assert_equal(jacobians, per_eg_jacobians_while,
rtol=2e-3, atol=1e-3)
class GradientsBenchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
self.evaluate(init)
self.evaluate(targets)
begin = time.time()
for _ in range(iters):
self.evaluate(targets)
end = time.time()
avg_time_ms = (1000 * (end - begin)) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_fc_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_fc_batch_jacobian(100, 32, 20)
self._run(pfor_jacobian, 100, name="fc_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="fc_batch_jacobian_while")
def benchmark_lstm_batch_jacobian(self):
with ops.Graph().as_default():
pfor_jacobian, while_jacobian = create_lstm_batch_jacobian(
100, 32, 8, inputs_size=128)
self._run(pfor_jacobian, 100, name="lstm_batch_jacobian_pfor")
self._run(while_jacobian, 20, name="lstm_batch_jacobian_while")
def benchmark_lstm_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_hessian(2, 2, 10)
self._run(pfor_hessian, 20, name="lstm_hessian_pfor")
self._run(while_hessian, 3, name="lstm_hessian_while_pfor")
def benchmark_lstm_batch_hessian(self):
with ops.Graph().as_default():
pfor_hessian, while_hessian = create_lstm_batch_hessian(4, 4, 10)
self._run(pfor_hessian, 100, name="lstm_batch_hessian_pfor")
self._run(while_hessian, 20, name="lstm_batch_hessian_while_pfor")
def benchmark_fc_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_fc_per_eg_grad(100, 32, 3)
self._run(pfor_outputs, 100, name="fc_per_eg_grad_pfor")
self._run(while_outputs, 20, name="fc_per_eg_grad_while")
def benchmark_lstm_per_eg_grad(self):
with ops.Graph().as_default():
pfor_outputs, while_outputs = create_lstm_per_eg_grad(100, 32, 8)
self._run(pfor_outputs, 100, name="lstm_per_eg_grad_pfor")
self._run(while_outputs, 20, name="lstm_per_eg_grad_while")
def benchmark_mnist_autobatch(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs, manual = create_mnist_autobatch(
100, data_format, training=False)
self._run(pfor_outputs, 100, name="mnist_pfor")
self._run(while_outputs, 20, name="mnist_while")
self._run(manual, 100, name="mnist_manual")
def benchmark_mnist_per_eg_grad(self):
with ops.Graph().as_default():
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_grad_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_grad_while")
def benchmark_mnist_per_eg_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
16, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_per_eg_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_per_eg_jacobian_while")
def benchmark_mnist_batch_jacobian(self):
with ops.Graph().as_default():
if test.is_gpu_available():
data_format = "channels_first"
else:
data_format = "channels_last"
pfor_outputs, while_outputs = create_mnist_batch_jacobian(
128, data_format, training=True)
self._run(pfor_outputs, 20, name="mnist_batch_jacobian_pfor")
self._run(while_outputs, 20, name="mnist_batch_jacobian_while")
def benchmark_fc_per_eg_jacobian(self):
with ops.Graph().as_default():
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
create_fc_per_eg_jacobians(batch_size=128,
activation_size=32,
num_layers=3))
self._run(jacobians, 30, name="fc_jacobians_pfor")
self._run(per_eg_jacobians_pfor, 100,
name="fc_per_eg_jacobians_pfor")
self._run(per_eg_jacobians_while, 10,
name="fc_per_eg_jacobians_while")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/gradients_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compiled parallel-for loop."""
# pylint: disable=missing-docstring,g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import flags
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
flags.DEFINE_bool(
"op_conversion_fallback_to_while_loop", False,
"If true, falls back to using a while loop for ops for "
"which a converter is not defined.")
def _stack(t, length):
"""stacks `t` `length` times."""
ones = array_ops.ones_like(array_ops.shape(t))
multiples = array_ops.concat([length, ones], 0)
t = array_ops.tile(array_ops.expand_dims(t, 0), multiples)
return wrap(t, True)
# The following stateful ops can be safely called once, and with the same
# signature as the unconverted version, if their inputs are loop invariant.
# TODO(agarwal): implement a strategy for converting Variable reads/writes. The
# plan is to map each read/write in the loop_fn to a corresponding merged
# read/write in the converted graph. Writes need to be mergeable (e.g.
# AssignAdd) to be used in `pfor`. Given a certain read/write order in the
# loop_fn, doing a one-to-one conversion will simulate executing such
# instructions in lock-step across all iterations.
passthrough_stateful_ops = set([
"VariableV2",
"VarHandleOp",
"ReadVariableOp",
"StackV2",
"TensorArrayWriteV3",
"TensorArrayReadV3",
"TensorArraySizeV3",
])
def _is_stateful_pfor_op(op):
if isinstance(op, WhileOp):
return op.is_stateful
if op.type == "Const":
# Const didn't have an op_def.
return False
if op.type in passthrough_stateful_ops:
return False
assert hasattr(op, "op_def") and op.op_def is not None, op
return op.op_def.is_stateful
# pylint: disable=protected-access
class WhileOp(object):
"""Object for storing state for converting the outputs of a while_loop."""
def __init__(self, exit_node, pfor_ops, pfor_config):
"""Initializer.
Args:
exit_node: A tensor output from the while_loop.
pfor_ops: list of ops inside the current pfor loop.
pfor_config: PForConfig object used while constructing loop body.
"""
self._pfor_config = pfor_config
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
assert isinstance(exit_node, ops.Tensor)
self._while_context = exit_node.op._get_control_flow_context()
assert isinstance(self._while_context, control_flow_ops.WhileContext)
self._context_name = self._while_context.name
self._condition = self._while_context.pivot.op.inputs[0]
# Parts of an external while_loop could be created inside a pfor loop.
# However for the purpose here, we declare such loops to be external. Also
# note that we check if the condition was created inside or outside to
# determine if the while_loop was first created inside or outside.
# TODO(agarwal): check that the Enter and Exit of this loop are unstacked.
self._is_inside_loop = self.op_is_inside_loop(self._condition.op)
if self._is_inside_loop:
for e in self._while_context.loop_exits:
assert self.op_is_inside_loop(e.op)
# Note the code below tries to reverse engineer an existing while_loop graph
# by assuming the following pattern of nodes.
#
# NextIteration <---- Body <--- Enter
# | ^
# V ___| Y
# Enter -> Merge -> Switch___
# ^ | N
# | V
# LoopCond Exit
# Node that elements in the list below correspond one-to-one with each
# other. i.e. these lists are the same size, and the i_th entry corresponds
# to different Operations/Tensors of a single cycle as illustrated above.
# List of Switch ops (ops.Operation) that feed into an Exit Node.
self._exit_switches = []
# List of inputs (ops.Tensor) to NextIteration.
self._body_outputs = []
# List of list of control inputs of the NextIteration nodes.
self._next_iter_control_inputs = []
# List of Merge ops (ops.Operation).
self._enter_merges = []
# List of output (ops.Tensor) of Exit nodes.
self._outputs = []
# List of Enter Tensors.
# There are two types of Enter nodes:
# - The Enter nodes that are used in the `loop_vars` argument to
# `while_loop` (see
# https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect
# these Enter nodes immediately below by tracing backwards from the Exit
# nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the
# diagram above. This allows us to have a 1:1 correspondence between the
# self._outputs and the first elements in self._enters.
# - The Enter nodes that are used only by the body. They don't appear in the
# `loop_vars` and are not returned from the `while_loop`. In Python code,
# they are usually captured by the body lambda. We collect them below by
# iterating over all the ops in the graph. They are appended to the end of
# self._enters or self._direct_enters, and don't correspond to any outputs
# in self._outputs. Note that we keep the resource/variant Enter nodes in
# self._direct_enters and the constructed while_loop's body uses them
# directly as opposed to passing them as loop variables. This is done
# because the while_body cannot partition the resource/variant Tensors, so
# it has to leave them unchanged.
self._enters = []
self._direct_enters = []
for e in self._while_context.loop_exits:
self._outputs.append(e.op.outputs[0])
switch = e.op.inputs[0].op
assert switch.type == "Switch", switch
self._exit_switches.append(switch)
merge = switch.inputs[0].op
assert merge.type == "Merge", merge
self._enter_merges.append(merge)
enter = merge.inputs[0].op
assert enter.type == "Enter", enter
self._enters.append(enter.outputs[0])
next_iter = merge.inputs[1].op
assert next_iter.type == "NextIteration", next_iter
self._body_outputs.append(next_iter.inputs[0])
self._next_iter_control_inputs.append(next_iter.control_inputs)
# Collect all the Enter nodes that are not part of `loop_vars`, the second
# category described above.
# Also track whether the loop body has any stateful ops.
self._is_stateful = False
for op in ops.get_default_graph().get_operations():
# TODO(agarwal): make sure this works with nested case.
control_flow_context = op._get_control_flow_context()
if control_flow_context is None:
continue
if control_flow_context.name == self._context_name:
self._is_stateful |= _is_stateful_pfor_op(op)
if op.type == "Enter":
output = op.outputs[0]
if output not in self._enters:
if output.dtype in (dtypes.resource, dtypes.variant):
if output not in self._direct_enters:
self._direct_enters.append(output)
else:
self._enters.append(output)
def __str__(self):
"""String representation."""
return "while_loop(%s)" % self.name
@property
def inputs(self):
"""Input to all the Enter nodes."""
return [x.op.inputs[0] for x in self._enters + self._direct_enters]
@property
def control_inputs(self):
"""Control input to all the Enter nodes."""
control_inputs = []
for x in self._enters + self._direct_enters:
control_inputs.extend(x.op.control_inputs)
return control_inputs
@property
def outputs(self):
"""Outputs of all the Exit nodes."""
return self._outputs
@property
def name(self):
"""Context name for the while loop."""
return self._context_name
@property
def is_inside_loop(self):
"""Returns true if the while_loop was created inside the pfor."""
return self._is_inside_loop
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
@property
def is_stateful(self):
return self._is_stateful
@property
def pfor_converter(self):
"""Return a converter for the while loop."""
return self
def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs,
inputs_stacked):
"""Create a PFor object for converting parts of the while_loop.
Args:
parent_pfor: PFor object being used for converting the while_loop.
indices: int32 Tensor of ids for the iterations that are still active
(i.e. did not exit the while_loop).
cond_stacked: True if the while_loop condition is stacked.
inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note
that these Tensors are a subset of the loop variables for the generated
while_loop.
inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`,
indicating if the value is stacked or not.
Returns:
A PFor instance. The instance is initialized by adding conversion mappings
of nodes that will be external to the conversion that the returned
instance will be used for. e.g. Enter nodes as well as Merge and Switch
outputs are mapped to converted values.
"""
num_outputs = len(self._outputs)
assert len(inputs) == len(self._enters)
assert len(inputs_stacked) == len(self._enters)
loop_var = parent_pfor.loop_var
loop_len = array_ops.size(indices)
pfor = PFor(
loop_var,
loop_len,
pfor_ops=self._pfor_ops,
all_indices=indices,
all_indices_partitioned=cond_stacked,
pfor_config=self._pfor_config)
# Map all inputs of Enter nodes in self._direct_enters to their converted
# values.
for enter in self._direct_enters:
enter_input = enter.op.inputs[0]
converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper(
enter_input)
# Since these are resources / variants, they should be unstacked.
assert not stacked and not is_sparse_stacked, (enter, converted_enter)
pfor._add_conversion(enter, wrap(converted_enter, False))
# Map all Enter nodes to the inputs.
for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked):
pfor._add_conversion(enter, wrap(inp, stacked))
# Map outputs of Switch and Merge.
for i in range(num_outputs):
wrapped_inp = wrap(inputs[i], inputs_stacked[i])
merge = self._enter_merges[i]
pfor._add_conversion(merge.outputs[0], wrapped_inp)
# Note that second output of Merge is typically not used, except possibly
# as a control dependency. To avoid trying to output the correct value, we
# employ a hack here. We output a dummy invalid value with an incorrect
# dtype. This will allow control dependency to work but if using it as an
# input, it should typically lead to errors during graph construction due
# to dtype mismatch.
# TODO(agarwal): Check in the original graph to see if there are any
# consumers of this Tensor that use it as an input.
pfor._add_conversion(merge.outputs[1],
wrap(constant_op.constant(-1.0), False))
switch = self._exit_switches[i]
# Don't need to worry about switch.output[0] which will feed to Exit node.
pfor._add_conversion(switch.outputs[1], wrapped_inp)
return pfor
def _convert_enter(self, parent_pfor, enter):
"""Converts an Enter node."""
inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0])
control_inputs = [
parent_pfor._convert_helper(x).t for x in enter.op.control_inputs
]
if control_inputs:
with ops.control_dependencies(control_inputs):
inp = array_ops.identity(inp)
return inp, stacked
def _maybe_stacked(self, cache, inp):
"""Heuristic to figue out if the coverting inp leads to a stacked value.
Args:
cache: map from Tensor to boolean indicating stacked/unstacked.
inp: input Tensor.
Returns:
True if `inp` could get stacked. If the function returns False, the
converted value should be guaranteed to be unstacked. If returning True,
it may or may not be stacked.
"""
if inp in cache:
return cache[inp]
if not self.op_is_inside_loop(inp.op):
return False
op = inp.op
output = False
if op.type in [
"Shape",
"Rank"
"ShapeN",
"ZerosLike",
"TensorArrayV3",
"TensorArraySizeV3",
]:
output = False
elif _is_stateful_pfor_op(op):
# This may be fairly aggressive.
output = True
elif op.type == "Exit":
# This may be fairly aggressive.
output = True
else:
for t in op.inputs:
if self._maybe_stacked(cache, t):
output = True
break
cache[inp] = output
return output
def _create_init_values(self, pfor_input):
"""Create arguments passed to converted while_loop."""
with ops.name_scope("while_init"):
loop_len_vector = pfor_input.pfor.loop_len_vector
loop_len = loop_len_vector[0]
num_outputs = len(self._outputs)
inputs = []
maybe_stacked_cache = {}
# Convert all the Enters. Need to do this before checking for stacking
# below.
for i, enter in enumerate(self._enters):
inp, stacked = self._convert_enter(pfor_input.pfor, enter)
inputs.append(inp)
maybe_stacked_cache[enter] = stacked
# Since this enter node is part of the `loop_vars`, it corresponds to an
# output and its preceding switch. We mark this switch's output the same
# stackness, to act at the base case for the logic below. Below, we will
# be going through the body figuring out which inputs might need to be
# stacked and which inputs can safely remain unstacked.
if i < num_outputs:
maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked
# Shape invariants for init_values corresponding to self._enters.
input_shape_invariants = []
# TensorArrays for outputs of converted while loop
output_tas = []
# Shape invariants for output TensorArrays.
ta_shape_invariants = []
# List of booleans indicating stackness of inputs, i.e. tensors
# corresponding to self._enters.
inputs_stacked = []
for i, inp in enumerate(inputs):
enter = self._enters[i]
inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter)
# Note that even when an input is unstacked, the body could make it
# stacked. we use a heuristic below to figure out if body may be making
# it stacked.
if i < num_outputs:
body_output = self._body_outputs[i]
if enter.op in self._pfor_ops:
body_output_stacked = self._maybe_stacked(maybe_stacked_cache,
body_output)
else:
# If constructed outside of pfor loop, then the output would not be
# stacked.
body_output_stacked = False
if body_output_stacked and not inp_stacked:
inp = _stack(inp, loop_len_vector).t
inputs[i] = inp
inp_stacked = True
# TODO(agarwal): other attributes for the TensorArray ?
output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len))
ta_shape_invariants.append(tensor_shape.TensorShape(None))
inputs_stacked.append(inp_stacked)
input_shape_invariants.append(tensor_shape.TensorShape(None))
# See documentation for __call__ for the structure of init_values.
init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas
# TODO(agarwal): try stricter shape invariants
shape_invariants = (
[tensor_shape.TensorShape(None),
tensor_shape.TensorShape(None)
] + input_shape_invariants + ta_shape_invariants)
return init_values, inputs_stacked, shape_invariants
def _process_cond_unstacked(self, conditions, indices, inputs, output_tas):
"""Handles case when condition is unstacked.
Note that all iterations end together. So we don't need to partition the
inputs. When all iterations are done, we write the inputs to the
TensorArrays. Note that we only write to index 0 of output_tas. Since all
iterations end together, they can all be output together.
"""
not_all_done = array_ops.reshape(conditions, [])
new_output_tas = []
# pylint: disable=cell-var-from-loop
for i, out_ta in enumerate(output_tas):
inp = inputs[i]
new_output_tas.append(
control_flow_ops.cond(not_all_done,
lambda: out_ta,
lambda: out_ta.write(0, inp)))
# pylint: enable=cell-var-from-loop
return not_all_done, indices, inputs, new_output_tas
def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked,
output_tas):
num_outputs = len(self._outputs)
# Compute if all iterations are done.
not_all_done = math_ops.reduce_any(conditions)
conditions_int = math_ops.cast(conditions, dtypes.int32)
# Partition the indices.
done_indices, new_indices = data_flow_ops.dynamic_partition(
indices, conditions_int, 2)
new_inputs = []
new_output_tas = []
for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)):
# Partition the inputs.
if stacked:
done_inp, new_inp = data_flow_ops.dynamic_partition(
inp, conditions_int, 2)
else:
# TODO(agarwal): avoid this stacking. See TODO earlier in
# _process_cond_unstacked.
done_inp = _stack(inp, [array_ops.size(done_indices)]).t
new_inp = inp
new_inputs.append(new_inp)
# For iterations that are done, write them to TensorArrays.
if i < num_outputs:
out_ta = output_tas[i]
# Note that done_indices can be empty. done_inp should also be empty in
# that case.
new_output_tas.append(out_ta.scatter(done_indices, done_inp))
return not_all_done, new_indices, new_inputs, new_output_tas
def _process_body(self, pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done):
"""Convert the body function."""
def true_fn(control_inputs, body_pfor, body_output, stacked):
"""Converts the body function for all but last iteration.
This essentially converts body_output. Additionally, it needs to handle
any control dependencies on the NextIteration node. So it creates another
Identity node with the converted dependencies.
"""
converted_control_inp = []
for x in control_inputs:
for t in x.outputs:
converted_control_inp.append(body_pfor._convert_helper(t).t)
if stacked:
# Note convert always does the stacking.
output = body_pfor.convert(body_output)
else:
output, convert_stacked, _ = body_pfor._convert_helper(body_output)
assert convert_stacked == stacked, body_output
with ops.control_dependencies(converted_control_inp):
return array_ops.identity(output)
body_pfor = self._init_pfor(pfor_input.pfor, new_indices,
cond_stacked, new_inputs,
inputs_stacked)
new_outputs = []
for i, (body_output, stacked) in enumerate(
zip(self._body_outputs, inputs_stacked)):
control_inp = self._next_iter_control_inputs[i]
out_dtype = body_output.dtype
# Note that we want to run the body only if not all pfor iterations are
# done. If all are done, we return empty tensors since these values will
# not be used. Notice that the value returned by the loop is based on
# TensorArrays and not directly on these returned values.
# pylint: disable=cell-var-from-loop
new_output = control_flow_ops.cond(
not_all_done,
lambda: true_fn(control_inp, body_pfor, body_output, stacked),
lambda: constant_op.constant([], dtype=out_dtype))
# pylint: enable=cell-var-from-loop
new_outputs.append(new_output)
return new_outputs
def __call__(self, pfor_input):
"""Converter for the while_loop.
The conversion of a while_loop is another while_loop.
The arguments to this converted while_loop are as follows:
not_all_done: Boolean scalar Tensor indicating if all the pfor iterations
are done.
indices: int32 1-D Tensor storing the id of the iterations that are not
done.
args: Remaining arguments. These can be divided into 3 categories:
- First set of arguments are the tensors that correspond to the initial
elements of self._enters. The elements that appear in original while
loop's `loop_vars`.
- The second set of arguments are the tensors that correspond to the
remaining elements of self._enters. These are the tensors that directly
enter the original while loop body.
- Finally, the last set of arguments are TensorArrays. These TensorArrays
correspond to the outputs of the original while_loop, i.e. to the
elements in self._outputs. Each TensorArray has `PFor.loop_len`
elements, i.e. the number of pfor iterations. At the end, the i'th
element of each TensorArray will contain the output computed by the
i'th iteration of pfor. Note that elements can be written into these
tensors arrays in any order, depending on when the corresponding pfor
iteration is done.
If the original while_loop had `k` tensors in its `loop_vars` and its body
directly captured `m` tensors, the `args` will contain `2 * k + m` values.
In each iteration, the while_loop body recomputes the condition for all
active pfor iterations to see which of them are now done. It then partitions
all the inputs and passes them along to the converted body. Values for all
the iterations that are done are written to TensorArrays indexed by the pfor
iteration number. When all iterations are done, the TensorArrays are stacked
to get the final value.
Args:
pfor_input: A PForInput object corresponding to the output of any Exit
node from this while loop.
Returns:
List of converted outputs.
"""
# Create init_values that will be passed to the while_loop.
init_values, inputs_stacked, shape_invariants = self._create_init_values(
pfor_input)
# Note that we use a list as a hack since we need the nested function body
# to set the value of cond_is_stacked. python2.x doesn't support nonlocal
# variables.
cond_is_stacked = [None]
def cond(not_all_done, *_):
return not_all_done
def body(not_all_done, indices, *args):
# See documentatin for __call__ for the structure of *args.
num_enters = len(self._enters)
inputs = args[:num_enters]
output_tas = args[num_enters:]
# TODO(agarwal): see which outputs have consumers and only populate the
# TensorArrays corresponding to those. Or do those paths get trimmed out
# from inside the while_loop body?
assert len(inputs) >= len(output_tas)
assert len(inputs) == len(inputs_stacked)
# Convert condition
with ops.name_scope("while_cond"):
# Note that we set cond_stacked to True here. At this point we don't
# know if it could be loop invariant, hence the conservative value is
# to assume stacked.
cond_pfor = self._init_pfor(pfor_input.pfor, indices,
cond_stacked=True,
inputs=inputs,
inputs_stacked=inputs_stacked)
conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition)
cond_is_stacked[0] = cond_stacked
# Recompute the new condition, write outputs of done iterations, and
# partition the inputs if needed.
if not cond_stacked:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_unstacked(
conditions, indices, inputs, output_tas)
else:
(not_all_done, new_indices,
new_inputs, new_output_tas) = self._process_cond_stacked(
conditions, indices, inputs, inputs_stacked, output_tas)
# Convert body
with ops.name_scope("while_body"):
# Compute the outputs from the body.
new_outputs = self._process_body(pfor_input, inputs_stacked,
new_indices, cond_stacked, new_inputs,
not_all_done)
# Note that the first num_outputs new values of inputs are computed using
# the body. Rest of them were direct Enters into the condition/body and
# the partitioning done earlier is sufficient to give the new value.
num_outputs = len(self._outputs)
new_args = ([not_all_done, new_indices] + new_outputs + list(
new_inputs[num_outputs:]) + new_output_tas)
return tuple(new_args)
while_outputs = control_flow_ops.while_loop(
cond, body, init_values, shape_invariants=shape_invariants)
output_tas = while_outputs[-len(self._outputs):]
outputs = []
assert cond_is_stacked[0] is not None
for inp_stacked, ta in zip(inputs_stacked, output_tas):
if cond_is_stacked[0]:
outputs.append(wrap(ta.stack(), True))
else:
# Note that if while_loop condition is unstacked, all iterations exit at
# the same time and we wrote those outputs in index 0 of the tensor
# array.
outputs.append(wrap(ta.read(0), inp_stacked))
return outputs
class _PforInput(object):
"""Input object passed to registered pfor converters."""
def __init__(self, pfor, op, inputs):
"""Creates a _PforInput object.
Args:
pfor: PFor converter object.
op: the Operation object that is being converted.
inputs: list of WrappedTensor objects representing converted values of the
inputs of `op`.
"""
self.pfor = pfor
self._op = op
self._inputs = inputs
def stack_inputs(self, stack_indices=None):
"""Stacks unstacked inputs at `stack_indices`.
Args:
stack_indices: indices of inputs at which stacking is done. If None,
stacking is done at all indices.
"""
if stack_indices is None:
stack_indices = range(len(self._inputs))
length = self.pfor.loop_len_vector
for i in stack_indices:
inp = self._inputs[i]
if not inp.is_stacked:
self._inputs[i] = _stack(inp.t, length)
def expanddim_inputs_for_broadcast(self):
"""Reshapes stacked inputs to prepare them for broadcast.
Since stacked inputs have an extra leading dimension, automatic broadcasting
rules could incorrectly try to expand dimensions before that leading
dimension. To avoid that, we reshape these stacked inputs to the maximum
rank they will need to be broadcasted to.
"""
if not self._inputs:
return
# Find max rank
def _get_rank(x):
rank = array_ops.rank(x.t)
if not x.is_stacked:
rank += 1
return rank
ranks = [_get_rank(x) for x in self._inputs]
max_rank = ranks[0]
for rank in ranks[1:]:
max_rank = math_ops.maximum(rank, max_rank)
for i, inp in enumerate(self._inputs):
if inp.is_stacked:
shape = array_ops.shape(inp.t)
rank_diff = array_ops.reshape(max_rank - ranks[i], [1])
ones = array_ops.tile([1], rank_diff)
new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0)
self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True)
@property
def inputs(self):
return self._inputs
@property
def num_inputs(self):
return len(self._inputs)
def input(self, index):
assert len(self._inputs) > index, (index, self._inputs)
return self._inputs[index]
def stacked_input(self, index):
t, is_stacked, _ = self.input(index)
if not is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be not loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
def unstacked_input(self, index):
t, is_stacked, _ = self.input(index)
if is_stacked:
op_type = self.op_type
op_def = getattr(self._op, "op_def", None)
if op_def is None:
input_name = "at index %d" % index
else:
input_name = "\"%s\"" % op_def.input_arg[index].name
raise ValueError("Input %s of op \"%s\" expected to be loop invariant"
".\nError while converting op %s"
"with converted inputs\n%s" % (input_name, op_type,
self._op, self.inputs))
return t
@property
def op(self):
return self._op
@property
def op_type(self):
return self._op.type
def get_attr(self, attr):
return self._op.get_attr(attr)
@property
def outputs(self):
return self._op.outputs
def output(self, index):
assert index < len(self._op.outputs)
return self._op.outputs[index]
_pfor_converter_registry = {}
class RegisterPFor(object):
"""Utility to register converters for pfor.
Usage:
@RegisterPFor(foo_op_type)
def _foo_converter(pfor_input):
...
The above will register conversion function `_foo_converter` for handling
conversion of `foo_op_type`. During conversion, the registered functin will be
called with a single argument of type `PForInput` which will contain state
needed for the conversion. This registered function should output a list of
WrappedTensor object with the same length as the number of outputs of op being
converted. If the op had zero outputs, then it should return a ops.Operation
object.
"""
def __init__(self, op_type):
"""Creates an object to register a converter for op with type `op_type`."""
self.op_type = op_type
def __call__(self, converter):
name = self.op_type
assert name not in _pfor_converter_registry, "Re-registering %s " % name
_pfor_converter_registry[name] = converter
return converter
class RegisterPForWithArgs(RegisterPFor):
"""Utility to register converters for pfor.
Usage:
@RegisteRPFor(foo_op_type, foo=value, ....)
def _foo_converter(pfor_input, foo=None, ....):
...
See RegisterPFor for details on the conversion function.
`RegisterPForWithArgs` allows binding extra arguments to the
conversion function at registration time.
"""
def __init__(self, op_type, *args, **kw_args):
super(RegisterPForWithArgs, self).__init__(op_type)
self._args = args
self._kw_args = kw_args
def __call__(self, converter):
def _f(pfor_input):
return converter(pfor_input, self.op_type, *self._args, **self._kw_args)
super(RegisterPForWithArgs, self).__call__(_f)
return converter
# TODO(agarwal): call raw_ops instead of calling these low level routines.
def _create_op(op_type, inputs, op_dtypes, attrs=None):
"""Utility to create an op."""
op = ops.get_default_graph().create_op(
op_type, inputs, op_dtypes, attrs=attrs, compute_device=True)
flat_attrs = nest.flatten([(str(a), op.get_attr(str(a))) for a in attrs])
execute.record_gradient(
op_type, op.inputs, tuple(flat_attrs), op.outputs[:], "")
return op
WrappedTensor = collections.namedtuple("WrappedTensor",
["t", "is_stacked", "is_sparse_stacked"])
"""Wrapper around the result of a Tensor conversion.
The additional fields are useful for keeping track of the conversion state as
data flows through the ops in the loop body. For every op whose output is a
Tensor, its converter should return either a WrappedTensor or a list of
WrappedTensors.
Args:
t: The converted tensor
is_stacked: True if the tensor is stacked, i.e. represents the results of all
the iterations of the loop, where each row i of the tensor corresponds to
that op's output on iteration i of the loop. False if the tensor is not
stacked, i.e. represents the result of the op on of a single iteration of
the loop, where the result does not vary between iterations.
is_sparse_stacked: True if the tensor corresponds to a component tensor
(indices, values, or dense_shape) of a sparse tensor, and has been logically
stacked via a sparse conversion.
"""
def wrap(tensor, is_stacked=True, is_sparse_stacked=False):
"""Helper to create a WrappedTensor object."""
assert isinstance(is_stacked, bool)
assert isinstance(is_sparse_stacked, bool)
assert isinstance(tensor, ops.Tensor)
assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is "
"stacked via a sparse "
"conversion, it must also be "
"stacked.")
return WrappedTensor(tensor, is_stacked, is_sparse_stacked)
def _fallback_converter(pfor_input):
logging.warn("Using a while_loop for converting %s", pfor_input.op_type)
output_dtypes = [x.dtype for x in pfor_input.outputs]
iters = pfor_input.pfor.loop_len_vector[0]
def while_body(i, *ta_list):
"""Body of while loop."""
inputs = [
x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs
]
op_outputs = _create_op(
pfor_input.op_type,
inputs,
output_dtypes,
attrs=pfor_input.op.node_def.attr).outputs
outputs = []
for out, ta in zip(op_outputs, ta_list):
assert isinstance(out, ops.Tensor)
outputs.append(ta.write(i, array_ops.expand_dims(out, 0)))
return tuple([i + 1] + outputs)
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters, while_body, [0] + [
tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes
])[1:]
return tuple([wrap(ta.concat(), True) for ta in ta_list])
class PForConfig(object):
"""A configuration object used to communicate with loop body function."""
def __init__(self):
# This may be set to the number of iterations.
self._maybe_iters = None
# Map from output placeholder to the unvectorized tensor.
self._reduce_concat_map = {}
# Reverse map of `self._reduce_concat_map`.
self._reverse_reduce_concat_map = {}
def _has_reductions(self):
"""True if some reductions where performed by loop body."""
return len(self._reduce_concat_map)
def _set_iters(self, iters):
"""Set number of pfor iterations."""
self._maybe_iters = iters
# TODO(agarwal): handle reductions inside control flow constructs.
def reduce_concat(self, x):
"""Performs a concat reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has rank one higher than `x`. The value is the vectorized
version of `x`, i.e. stacking the value of `x` across different pfor
iterations.
"""
assert not context.executing_eagerly()
assert isinstance(x, ops.Tensor)
if x not in self._reduce_concat_map:
out_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(
x.shape)
with ops.control_dependencies([x]):
# Control dependency to make sure out is converted after x.
out = array_ops.placeholder(x.dtype, out_shape)
self._reduce_concat_map[out] = x
self._reverse_reduce_concat_map[x] = out
return out
else:
return self._reverse_reduce_concat_map[x]
def reduce_mean(self, x):
"""Performs a mean reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the mean of the values
of `x` across the pfor iterations.
"""
y = self.reduce_concat(x)
return math_ops.reduce_mean(y, axis=0)
def reduce_sum(self, x):
"""Performs a sum reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the sum of the values
of `x` across the pfor iterations.
"""
y = self.reduce_concat(x)
return math_ops.reduce_sum(y, axis=0)
def _lookup_reduction(self, pl):
"""Lookups Placeholder `pl` in the reduction map."""
msg = "Expected Tensor, got {} of type {}."
assert isinstance(pl, ops.Tensor), msg.format(pl, type(pl))
return self._reduce_concat_map.get(pl, None)
class PFor(object):
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
all_indices=None,
all_indices_partitioned=False,
pfor_config=None):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: ops.Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
pfor_config: PForConfig object used while constructing the loop body.
"""
assert isinstance(loop_var, ops.Tensor)
assert loop_var.op.type == "Placeholder"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
self.all_indices = (
math_ops.range(loop_len) if all_indices is None else all_indices)
self._conversion_map = {}
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set([x._id for x in pfor_ops])
self._pfor_config = pfor_config
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.SparseTensor.
Returns:
A tf.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape().dims[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = map_fn.map_fn(
fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A ops.Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if y is None:
return None
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, ops.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
self._conversion_map[old_output] = new_output
def _convert_helper(self, op_or_tensor):
stack = [op_or_tensor]
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.pop(0)
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, ops.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(
y, pfor_ops=self._pfor_ops, pfor_config=self._pfor_config)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.insert(0, x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(self._was_converted(x) for x in y_op.inputs)
some_input_stacked = any(x.is_stacked for x in converted_inputs)
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"):
# Op is a placeholder for a reduction.
if (self._pfor_config is not None and
self._pfor_config._lookup_reduction(y) is not None):
# Handle reductions. Map the placeholder to the unvectorized input
# that is being reduced.
reduction_input = self._pfor_config._lookup_reduction(y)
assert isinstance(reduction_input, ops.Tensor), reduction_input
# Tensor being reduced should already be converted due to a control
# dependency on the created placeholder.
# Note that in cases where reduction_input is in an outer context, one
# needs to locate the corresponding Enter node and use that to lookup
# the conversion.
# TODO(agarwal): handle reductions inside control flow constructs.
assert reduction_input in self._conversion_map, (
"Unable to handle reduction of %s, possibly as it was used "
"inside a control flow construct. Note that reductions across "
"pfor iterations are currently not supported inside control flow "
"constructs." % reduction_input)
output = self._conversion_map[reduction_input]
# If original input is not stacked, we tile it. Also we always mark
# output as unstacked.
new_outputs = [wrap(self._unwrap_or_tile(output), False)]
# None of the inputs and control inputs were converted.
elif (not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)):
if y == y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or uncoverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y == y_op:
new_outputs = new_op
else:
new_outputs = [wrap(x, False) for x in new_op.outputs]
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
if flags.FLAGS.op_conversion_fallback_to_while_loop:
converter = _fallback_converter
else:
raise ValueError(
"No converter defined for %s\n%s\ninputs: %s. "
"\nEither add a converter or set "
"--op_conversion_fallback_to_while_loop=True, "
"which may run slower" % (y_op.type, y_op, converted_inputs))
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
new_outputs = converter(_PforInput(self, y_op, converted_inputs))
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, "converted %s %s", y_op, new_outputs)
# Insert into self._conversion_map
if y == y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
self._add_conversion(old_output, new_output)
stack.pop(0)
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool3DGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPool3DGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
def _convert_flatten_batch(pfor_input, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWCS.
We then merge the S and C dimension.
Args:
x: ops.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x, data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], [-1]], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNormV3 on individual examples
# is very different from running FusedBatchNormV3 on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNormV3")
def _convert_fused_batch_norm(pfor_input):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)]
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNormV3,
# and then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGradV3")
def _convert_fused_batch_norm_grad(pfor_input):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims,
shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones(
[array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, array_ops.expand_dims(output, 0))
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = control_flow_ops.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.concat()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
@RegisterPForWithArgs("LogSoftmax", gen_nn_ops.log_softmax)
@RegisterPForWithArgs("Softmax", gen_nn_ops.softmax)
def _convert_softmax(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(pfor_input.stacked_input(0)), True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
@RegisterPForWithArgs("MatrixDiag", array_ops.matrix_diag)
@RegisterPForWithArgs("MatrixDiagPart", array_ops.matrix_diag_part)
def _convert_identity(pfor_input, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("IdentityN")
def _convert_identity_n(pfor_input):
outputs = array_ops.identity_n([x.t for x in pfor_input.inputs])
return [wrap(out, inp.is_stacked) for out, inp in
zip(outputs, pfor_input.inputs)]
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("BroadcastTo")
def _convert_broadcast_to(pfor_input):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
new_shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
# Expand dims of stacked t to broadcast against the new shape.
# TODO(davmre): consider factoring out common code with
# `expanddim_inputs_for_broadcast`, which has similar logic but with
# implicit shapes (of input Tensors) rather than explicit shapes.
rank_diff = array_ops.shape(new_shape)[0] - array_ops.rank(t)
ones = array_ops.tile([1], array_ops.reshape(rank_diff, [1]))
t_shape = array_ops.shape(t)
t_expanded_shape = array_ops.concat([t_shape[:1], ones, t_shape[1:]], axis=0)
return wrap(array_ops.broadcast_to(array_ops.reshape(t, t_expanded_shape),
new_shape), True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dtypes.int32)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPForWithArgs("LowerBound", gen_array_ops.lower_bound)
@RegisterPForWithArgs("UpperBound", gen_array_ops.upper_bound)
def _convert_searchsorted(pfor_input, _, op_func):
pfor_input.stack_inputs()
sorted_inputs = _flatten_first_two_dims(pfor_input.stacked_input(0))
values = _flatten_first_two_dims(pfor_input.stacked_input(1))
out_type = pfor_input.get_attr("out_type")
output = op_func(sorted_inputs, values, out_type)
return wrap(_unflatten_first_dim(
output, pfor_input.pfor.loop_len_vector), True)
@RegisterPFor("MatrixBandPart")
def _convert_matrix_band_part(pfor_input):
t = pfor_input.stacked_input(0)
num_lower = pfor_input.unstacked_input(1)
num_upper = pfor_input.unstacked_input(2)
return wrap(array_ops.matrix_band_part(
t, num_lower=num_lower, num_upper=num_upper), True)
@RegisterPFor("MatrixDiagPartV2")
def _convert_matrix_diag_part_v2(pfor_input):
t = pfor_input.stacked_input(0)
return wrap(array_ops.matrix_diag_part(t), True)
@RegisterPFor("MatrixSetDiag")
@RegisterPFor("MatrixSetDiagV2")
def _convert_matrix_set_diag(pfor_input):
pfor_input.stack_inputs()
t = pfor_input.stacked_input(0)
diag = pfor_input.stacked_input(1)
return wrap(array_ops.matrix_set_diag(t, diag), True)
@RegisterPFor("OneHot")
def _convert_one_hot(pfor_input):
indices = pfor_input.stacked_input(0)
depth = pfor_input.unstacked_input(1)
on_value = pfor_input.unstacked_input(2)
off_value = pfor_input.unstacked_input(3)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.one_hot(indices, depth, on_value, off_value, axis), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input):
t = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
size = pfor_input.unstacked_input(2)
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("SplitV")
def _convert_split_v(pfor_input):
t = pfor_input.stacked_input(0)
splits = pfor_input.unstacked_input(1)
split_dim = pfor_input.unstacked_input(2)
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, splits, axis=split_dim)]
@RegisterPFor("Squeeze")
def _convert_squeeze(pfor_input):
t = pfor_input.stacked_input(0)
squeeze_dims = pfor_input.get_attr("squeeze_dims")
squeeze_dims = [i + 1 if i >= 0 else i for i in squeeze_dims]
return wrap(array_ops.squeeze(t, axis=squeeze_dims), True)
@RegisterPFor("Transpose")
def _convert_transpose(pfor_input):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(array_ops.transpose(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeroslike(pfor_input):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
axis = pfor_input.unstacked_input(2)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices == pfor_input.pfor.all_indices and axis == 0:
param_shape0 = param.shape.dims[0].value
indices_shape0 = indices.shape.dims[0].value
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
# TODO(agarwal): use array_ops.slice here.
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis)
if axis != 0:
axis = control_flow_ops.cond(
axis < 0, lambda: axis + array_ops.rank(param), lambda: axis)
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = control_flow_ops.cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
loop_len_vector = pfor_input.pfor.loop_len_vector
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
param_flat = _flatten_first_two_dims(param)
# Recompute indices to handle stacked param.
indices_offset = math_ops.range(
loop_len_vector[0]) * array_ops.shape(param)[1]
# Reshape indices_offset to allow broadcast addition
ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32)
new_shape = array_ops.concat([loop_len_vector, ones], axis=0)
indices_offset = array_ops.reshape(indices_offset, new_shape)
indices += indices_offset
# TODO(agarwal): handle axis != 0. May need to transpose param or
# array_ops.gather_nd.
if isinstance(axis, ops.Tensor):
axis_value = tensor_util.constant_value(axis)
else:
try:
axis_value = int(axis)
except TypeError:
axis_value = None
msg = ("Gather, where indices and param are both loop dependent, currently "
"requires axis=0")
if axis_value is not None and axis_value != 0:
raise ValueError("Error while converting %s. %s. Got axis=%d" %
(pfor_input.op, msg, axis))
with ops.control_dependencies(
[check_ops.assert_equal(axis, 0, message=msg)]):
output = array_ops.gather(param_flat, indices)
return wrap(output, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = control_flow_ops.cond(
math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2])
new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
# TODO(rmlarsen): Use the converter of BatchMatMulV2 once compatibility window
# is met.
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPFor("BatchMatMulV2")
def _convert_batch_mat_mul_v2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
@RegisterPForWithArgs("Mean", math_ops.reduce_mean)
@RegisterPForWithArgs("All", math_ops.reduce_all)
@RegisterPForWithArgs("Any", math_ops.reduce_any)
def _convert_reduction(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, dtypes.int32)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, dtypes.int32)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input):
t, t_stacked, _ = pfor_input.input(0)
bias, bias_stacked, _ = pfor_input.input(1)
data_format = pfor_input.get_attr("data_format").decode()
if bias_stacked:
# BiasAdd only supports 1-D biases, so cast bias to match value and use Add.
pfor_input.expanddim_inputs_for_broadcast()
t, _, _ = pfor_input.input(0)
bias = math_ops.cast(pfor_input.stacked_input(1), t.dtype)
if compat.as_bytes(data_format) == b"NCHW":
b_shape = array_ops.shape(bias)
new_b_shape = array_ops.concat(
[b_shape[:-3], b_shape[-1:], b_shape[-3:-1]], axis=0)
bias = array_ops.reshape(bias, new_b_shape)
return wrap(math_ops.add(t, bias), True)
else:
assert t_stacked, "At least one input to BiasAdd should be loop variant."
if compat.as_bytes(data_format) == b"NCHW":
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format="NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
@RegisterPFor("UnsortedSegmentSum")
def _convert_unsortedsegmentsum(pfor_input):
data, data_stacked, _ = pfor_input.input(0)
# TODO(agarwal): handle unstacked?
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if not data_stacked:
data = _stack(data, pfor_input.pfor.loop_len_vector).t
segment_shape = array_ops.shape(segment_ids)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape)[1:]
segment_offset = num_segments * math_ops.range(n)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids += segment_offset
num_segments = math_ops.cast(num_segments, dtypes.int64) * math_ops.cast(
n, dtypes.int64)
output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
@RegisterPForWithArgs("Abs", math_ops.abs)
@RegisterPForWithArgs("Acos", math_ops.acos)
@RegisterPForWithArgs("Acosh", math_ops.acosh)
@RegisterPForWithArgs("Add", math_ops.add)
@RegisterPForWithArgs("AddV2", math_ops.add_v2)
@RegisterPForWithArgs("Angle", math_ops.angle)
@RegisterPForWithArgs("Asin", math_ops.asin)
@RegisterPForWithArgs("Asinh", math_ops.asinh)
@RegisterPForWithArgs("Atan", math_ops.atan)
@RegisterPForWithArgs("Atan2", math_ops.atan2)
@RegisterPForWithArgs("Atanh", math_ops.atanh)
@RegisterPForWithArgs("BesselI0e", math_ops.bessel_i0e)
@RegisterPForWithArgs("BesselI1e", math_ops.bessel_i1e)
@RegisterPForWithArgs("BitwiseAnd", bitwise_ops.bitwise_and)
@RegisterPForWithArgs("BitwiseOr", bitwise_ops.bitwise_or)
@RegisterPForWithArgs("BitwiseXor", bitwise_ops.bitwise_xor)
@RegisterPForWithArgs("Ceil", math_ops.ceil)
@RegisterPForWithArgs("Complex", math_ops.complex)
@RegisterPForWithArgs("ComplexAbs", math_ops.complex_abs)
@RegisterPForWithArgs("Conj", math_ops.conj)
@RegisterPForWithArgs("Cos", math_ops.cos)
@RegisterPForWithArgs("Cosh", math_ops.cosh)
@RegisterPForWithArgs("Digamma", math_ops.digamma)
@RegisterPForWithArgs("Div", math_ops.div)
@RegisterPForWithArgs("DivNoNan", math_ops.div_no_nan)
@RegisterPForWithArgs("Elu", nn_ops.elu)
@RegisterPForWithArgs("Equal", math_ops.equal)
@RegisterPForWithArgs("Erf", math_ops.erf)
@RegisterPForWithArgs("Erfc", math_ops.erfc)
@RegisterPForWithArgs("Exp", math_ops.exp)
@RegisterPForWithArgs("Expm1", math_ops.expm1)
@RegisterPForWithArgs("Floor", math_ops.floor)
@RegisterPForWithArgs("FloorDiv", math_ops.floor_div)
@RegisterPForWithArgs("FloorMod", math_ops.floor_mod)
@RegisterPForWithArgs("Greater", math_ops.greater)
@RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal)
@RegisterPForWithArgs("Igamma", math_ops.igamma)
@RegisterPForWithArgs("IgammaGradA", math_ops.igamma_grad_a)
@RegisterPForWithArgs("Igammac", math_ops.igammac)
@RegisterPForWithArgs("Imag", math_ops.imag)
@RegisterPForWithArgs("Inv", math_ops.inv)
@RegisterPForWithArgs("Invert", bitwise_ops.invert)
@RegisterPForWithArgs("IsFinite", math_ops.is_finite)
@RegisterPForWithArgs("IsInf", math_ops.is_inf)
@RegisterPForWithArgs("IsNan", math_ops.is_nan)
@RegisterPForWithArgs("LeftShift", bitwise_ops.left_shift)
@RegisterPForWithArgs("Less", math_ops.less)
@RegisterPForWithArgs("LessEqual", math_ops.less_equal)
@RegisterPForWithArgs("Lgamma", math_ops.lgamma)
@RegisterPForWithArgs("Log", math_ops.log)
@RegisterPForWithArgs("Log1p", math_ops.log1p)
@RegisterPForWithArgs("LogicalAnd", math_ops.logical_and)
@RegisterPForWithArgs("LogicalNot", math_ops.logical_not)
@RegisterPForWithArgs("LogicalOr", math_ops.logical_or)
@RegisterPForWithArgs("LogicalXor", math_ops.logical_xor)
@RegisterPForWithArgs("Maximum", math_ops.maximum)
@RegisterPForWithArgs("Minimum", math_ops.minimum)
@RegisterPForWithArgs("Mod", math_ops.mod)
@RegisterPForWithArgs("Mul", math_ops.multiply)
@RegisterPForWithArgs("MulNoNan", math_ops.mul_no_nan)
@RegisterPForWithArgs("Neg", math_ops.negative)
@RegisterPForWithArgs("NotEqual", math_ops.not_equal)
@RegisterPForWithArgs("Polygamma", math_ops.polygamma)
@RegisterPForWithArgs("Pow", math_ops.pow)
@RegisterPForWithArgs("Real", math_ops.real)
@RegisterPForWithArgs("RealDiv", math_ops.divide)
@RegisterPForWithArgs("Reciprocal", math_ops.reciprocal)
@RegisterPForWithArgs("Relu", nn_ops.relu)
@RegisterPForWithArgs("Relu6", nn_ops.relu6)
@RegisterPForWithArgs("RightShift", bitwise_ops.right_shift)
@RegisterPForWithArgs("Rint", math_ops.rint)
@RegisterPForWithArgs("Round", math_ops.round)
@RegisterPForWithArgs("Rsqrt", math_ops.rsqrt)
@RegisterPForWithArgs("Selu", nn_ops.selu)
@RegisterPForWithArgs("Sigmoid", math_ops.sigmoid)
@RegisterPForWithArgs("Sign", math_ops.sign)
@RegisterPForWithArgs("Sin", math_ops.sin)
@RegisterPForWithArgs("Sinh", math_ops.sinh)
@RegisterPForWithArgs("Softplus", nn_ops.softplus)
@RegisterPForWithArgs("Softsign", nn_ops.softsign)
@RegisterPForWithArgs("Sqrt", math_ops.sqrt)
@RegisterPForWithArgs("Square", math_ops.square)
@RegisterPForWithArgs("SquaredDifference", math_ops.squared_difference)
@RegisterPForWithArgs("Sub", math_ops.subtract)
@RegisterPForWithArgs("Tan", math_ops.tan)
@RegisterPForWithArgs("Tanh", math_ops.tanh)
@RegisterPForWithArgs("TruncateDiv", math_ops.truncate_div)
@RegisterPForWithArgs("TruncateMod", math_ops.truncate_mod)
@RegisterPForWithArgs("Xdivy", math_ops.xdivy)
@RegisterPForWithArgs("Xlogy", math_ops.xlogy)
@RegisterPForWithArgs("Zeta", math_ops.zeta)
def _convert_cwise(pfor_input, op_type, op_func):
# Note that ops handled here do not have attributes except those listed below
# and hence don't need extra arguments passed to the cwise_op call below.
for attr in pfor_input.op.node_def.attr.keys():
assert attr in [u"T", u"Tout", u"_xla_compile_id"], (op_type, attr)
pfor_input.expanddim_inputs_for_broadcast()
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("ApproximateEqual")
def _convert_approximate_equal(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
tolerance = pfor_input.get_attr("tolerance")
return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:]
if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input):
# AddN does not support broadcasting.
pfor_input.stack_inputs()
return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("EluGrad")
@RegisterPForWithArgs("Relu6Grad")
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("SeluGrad")
@RegisterPForWithArgs("SigmoidGrad")
@RegisterPForWithArgs("SoftplusGrad")
@RegisterPForWithArgs("SoftsignGrad")
@RegisterPForWithArgs("TanhGrad")
@RegisterPForWithArgs("SqrtGrad")
@RegisterPForWithArgs("RsqrtGrad")
@RegisterPForWithArgs("ReciprocalGrad")
def _convert_grads(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = control_flow_ops.cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = control_flow_ops.cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
@RegisterPFor("SelectV2")
def _convert_selectv2(pfor_input):
pfor_input.expanddim_inputs_for_broadcast()
cond = pfor_input.input(0)[0]
t = pfor_input.input(1)[0]
e = pfor_input.input(2)[0]
out = array_ops.where_v2(cond, t, e)
return wrap(out, True)
# random_ops
def _transpose_dim_to_front(x, dim):
rank = array_ops.rank(x)
return array_ops.transpose(
x,
perm=array_ops.concat([
[dim],
math_ops.range(0, dim),
math_ops.range(dim + 1, rank)], axis=0))
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
def _convert_random(pfor_input, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
inputs[0] = array_ops.concat(
[pfor_input.pfor.loop_len_vector, inputs[0]], axis=0)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("RandomGamma")
@RegisterPFor("RandomPoissonV2")
def _convert_random_with_param(pfor_input):
shape = pfor_input.unstacked_input(0)
# param is lam (Poisson rate) or alpha (Gamma shape).
param, param_stacked, _ = pfor_input.input(1)
logging.warning(
"Note that %s inside pfor op may not give same output as "
"inside a sequential loop.", pfor_input.op_type)
if param_stacked:
samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
loop_dim = array_ops.shape(shape)[0]
stacked_samples = _transpose_dim_to_front(samples, loop_dim)
else:
shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0)
stacked_samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(stacked_samples, True)
@RegisterPFor("Multinomial")
def _convert_multinomial(pfor_input):
logits, logits_stacked, _ = pfor_input.input(0)
num_samples = pfor_input.unstacked_input(1)
seed = pfor_input.get_attr("seed")
seed2 = pfor_input.get_attr("seed2")
output_dtype = pfor_input.get_attr("output_dtype")
logging.warning(
"Note that Multinomial inside pfor op may not give same output as "
"inside a sequential loop.")
n = pfor_input.pfor.loop_len_vector[0]
if logits_stacked:
flattened_logits = _flatten_first_two_dims(logits)
samples = gen_random_ops.multinomial(
flattened_logits,
num_samples,
seed=seed, seed2=seed2, output_dtype=output_dtype)
stacked_samples = _unflatten_first_dim(samples, [n])
else:
samples = gen_random_ops.multinomial(
logits, num_samples * n,
seed=seed, seed2=seed2, output_dtype=output_dtype)
stacked_samples = array_ops.transpose(
array_ops.reshape(samples, [-1, n, num_samples]), [1, 0, 2])
return wrap(stacked_samples, True)
# linalg_ops
@RegisterPFor("Cholesky")
def _convert_cholesky(pfor_input):
t = pfor_input.stacked_input(0)
return wrap(linalg_ops.cholesky(t), True)
@RegisterPFor("LogMatrixDeterminant")
def _convert_log_matrix_determinant(pfor_input):
# Input must have shape [N, M, M], so we need to flatten.
t = _flatten_first_two_dims(pfor_input.stacked_input(0))
sign, log_abs_det = linalg_ops.log_matrix_determinant(t)
return [wrap(_unflatten_first_dim(x, pfor_input.pfor.loop_len_vector), True)
for x in (sign, log_abs_det)]
@RegisterPFor("MatrixTriangularSolve")
def _convert_matrix_triangular_solve(pfor_input):
pfor_input.stack_inputs()
matrix = pfor_input.stacked_input(0)
rhs = pfor_input.stacked_input(1)
lower = pfor_input.get_attr("lower")
adjoint = pfor_input.get_attr("adjoint")
output = linalg_ops.matrix_triangular_solve(
matrix, rhs, lower=lower, adjoint=adjoint)
return wrap(output, True)
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op("Assert", [cond] + data_list, [],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]:
raise ValueError("Unable to find source for handle %s" % handle)
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(
handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError("Need indices for %s to be loop invariant" % handle)
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError("Need indices for %s to be not loop invariant" % handle)
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", (
"Unable to find StackV2 op. Got %s" % handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop"
" invariant, but we are now trying to push a loop dependent value"
" to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the covertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/pfor.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x_i]:
axes = [0, 2, -1] if y == x else [0]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_one_hot(self):
indices = random_ops.random_uniform(
[3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),
axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,
side="right")] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 4)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0),
array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i),
num_lower=num_lower,
num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 4, 2])
def loop_fn(i):
return array_ops.matrix_diag(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 2])
def loop_fn(i):
return array_ops.matrix_diag_part(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
return (array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...]))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/array_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of math kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTestCase):
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_unary_cwise_complex_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
self._test_unary_cwise_ops(complex_ops, True)
def test_unary_cwise_real_ops_1(self):
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_real_ops_2(self):
real_ops = [
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
nn.selu,
nn.softplus,
nn.softsign,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil,
math_ops.floor,
math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)),
x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
# FloorDiv fails on XLA due floor's discontinuities exacerbating small
# division differences.
if not test_util.is_xla_enabled():
float_ops += [math_ops.floor_div]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.bool])
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul_broadcast(self):
if not compat.forward_compatible(2019, 4, 25):
self.skipTest("Skipping test for future functionality.")
for broadcast_a in (True, False):
for broadcast_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 3, 5) if broadcast_a else (4, 2, 3, 5)
shape_b = (2, 5, 7) if broadcast_b else (4, 2, 5, 7)
shape_a = (2,) + shape_a if stack_a else shape_a
shape_b = (2,) + shape_b if stack_b else shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min, math_ops.reduce_mean,
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_boolean_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5]) > 0.5
for op in [math_ops.reduce_any, math_ops.reduce_all]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2, loop_fn_dtypes=[dtypes.bool])
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
for data_format in ("NCHW", "NHWC"):
for stacked_value in (True, False):
x_shape = [3, 4, 5, 6]
if stacked_value:
x_shape = [2] + x_shape
x = random_ops.random_uniform(x_shape)
for stacked_bias in (True, False):
if not (stacked_value or stacked_bias):
continue
with backprop.GradientTape(persistent=True) as g:
bias_dim = -1
if data_format == "NCHW":
bias_dim = 2 if stacked_value else 1
bias_shape = [x_shape[bias_dim]]
if stacked_bias:
bias_shape = [2] + bias_shape
bias = random_ops.random_uniform(bias_shape)
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i) if stacked_value else x
b = array_ops.gather(bias, i) if stacked_bias else bias
y = nn.bias_add(a, b, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
grad = g.gradient(loss, bias)
if stacked_bias:
# If we gather over bias in loop_fn, the gradient will be an
# instance of `IndexedSlices` with attrs `values` and `indices`.
return y, grad.values, grad.indices
else:
return y, grad
# pylint: enable=cell-var-from-loop
out_dtypes = [dtypes.float32, dtypes.float32]
if stacked_bias:
out_dtypes = out_dtypes + [dtypes.int32]
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=out_dtypes)
def test_unsorted_segment_sum(self):
t = random_ops.random_uniform([3, 3, 2])
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]])
num_segments = 3
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments))
self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 2)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_needs_broadcast(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_args_need_broadcast(self):
a = random_ops.random_uniform([2, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_fixed(self):
cond = random_ops.random_uniform([3, 5]) > 0.5
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for a_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
a = random_ops.random_uniform(a_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
return array_ops.where_v2(cond, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class LinalgTest(PForTestCase):
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
def test_log_matrix_determinant(self):
x = random_ops.random_normal([3, 4, 2, 2])
def loop_fn(i):
return linalg_ops.log_matrix_determinant(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_triangular_solve(self):
for lower in (True, False):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = array_ops.matrix_band_part(
random_ops.random_uniform(shape_a)
+ linalg_ops.eye(3), # Ensure well-conditioned.
*((-1, 0) if lower else (0, -1))) # Ensure triangular.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_triangular_solve(a, b,
lower=lower,
adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/ops/parallel_for/math_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal reconstruction via overlapped addition of frames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("signal.overlap_and_add")
def overlap_and_add(signal, frame_step, name=None):
"""Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] `Tensor`. All dimensions may be
unknown, and rank must be at least 2.
frame_step: An integer or scalar `Tensor` denoting overlap offsets. Must be
less than or equal to `frame_length`.
name: An optional name for the operation.
Returns:
A `Tensor` with shape `[..., output_size]` containing the overlap-added
frames of `signal`'s inner-most two dimensions.
Raises:
ValueError: If `signal`'s rank is less than 2, or `frame_step` is not a
scalar integer.
"""
with ops.name_scope(name, "overlap_and_add", [signal, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
signal.shape.with_rank_at_least(2)
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
frame_step.shape.assert_has_rank(0)
if not frame_step.dtype.is_integer:
raise ValueError("frame_step must be an integer. Got %s" %
frame_step.dtype)
signal_shape = array_ops.shape(signal)
# All dimensions that are not part of the overlap-and-add. Can be empty for
# rank 2 inputs.
outer_dimensions = signal_shape[:-2]
outer_rank = array_ops.size(outer_dimensions)
def full_shape(inner_shape):
return array_ops.concat([outer_dimensions, inner_shape], 0)
frame_length = signal_shape[-1]
frames = signal_shape[-2]
# Compute output length.
output_length = frame_length + frame_step * (frames - 1)
# If frame_length is equal to frame_step, there's no overlap so just
# reshape the tensor.
frame_step_static = tensor_util.constant_value(frame_step)
if (frame_step_static is not None and signal.shape.dims is not None and
frame_step_static == signal.shape.dims[-1].value):
output_shape = full_shape([output_length])
return array_ops.reshape(signal, output_shape, name="fast_path")
# The following code is documented using this example:
#
# frame_step = 2
# signal.shape = (3, 5)
# a b c d e
# f g h i j
# k l m n o
# Compute the number of segments, per frame.
segments = -(-frame_length // frame_step) # Divide and round up.
# Pad the frame_length dimension to a multiple of the frame step.
# Pad the frames dimension by `segments` so that signal.shape = (6, 6)
# a b c d e 0
# f g h i j 0
# k l m n o 0
# 0 0 0 0 0 0
# 0 0 0 0 0 0
# 0 0 0 0 0 0
paddings = [[0, segments], [0, segments * frame_step - frame_length]]
outer_paddings = array_ops.zeros([outer_rank, 2], dtypes.int32)
paddings = array_ops.concat([outer_paddings, paddings], 0)
signal = array_ops.pad(signal, paddings)
# Reshape so that signal.shape = (3, 6, 2)
# ab cd e0
# fg hi j0
# kl mn o0
# 00 00 00
# 00 00 00
# 00 00 00
shape = full_shape([frames + segments, segments, frame_step])
signal = array_ops.reshape(signal, shape)
# Transpose dimensions so that signal.shape = (3, 6, 2)
# ab fg kl 00 00 00
# cd hi mn 00 00 00
# e0 j0 o0 00 00 00
perm = array_ops.concat(
[math_ops.range(outer_rank), outer_rank + [1, 0, 2]], 0)
signal = array_ops.transpose(signal, perm)
# Reshape so that signal.shape = (18, 2)
# ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0 00 00 00
shape = full_shape([(frames + segments) * segments, frame_step])
signal = array_ops.reshape(signal, shape)
# Truncate so that signal.shape = (15, 2)
# ab fg kl 00 00 00 cd hi mn 00 00 00 e0 j0 o0
signal = signal[..., :(frames + segments - 1) * segments, :]
# Reshape so that signal.shape = (3, 5, 2)
# ab fg kl 00 00
# 00 cd hi mn 00
# 00 00 e0 j0 o0
shape = full_shape([segments, (frames + segments - 1), frame_step])
signal = array_ops.reshape(signal, shape)
# Now, reduce over the columns, to achieve the desired sum.
signal = math_ops.reduce_sum(signal, -3)
# Flatten the array.
shape = full_shape([(frames + segments - 1) * frame_step])
signal = array_ops.reshape(signal, shape)
# Truncate to final length.
signal = signal[..., :output_length]
return signal
|
tensorflow-master
|
tensorflow/python/ops/signal/reconstruction_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Signal processing operations.
See the [tf.signal](https://tensorflow.org/api_guides/python/contrib.signal)
guide.
@@frame
@@hamming_window
@@hann_window
@@inverse_stft
@@inverse_stft_window_fn
@@mfccs_from_log_mel_spectrograms
@@linear_to_mel_weight_matrix
@@overlap_and_add
@@stft
[hamming]: https://en.wikipedia.org/wiki/Window_function#Hamming_window
[hann]: https://en.wikipedia.org/wiki/Window_function#Hann_window
[mel]: https://en.wikipedia.org/wiki/Mel_scale
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.ops.signal.dct_ops import dct
from tensorflow.python.ops.signal.fft_ops import fft
from tensorflow.python.ops.signal.fft_ops import fft2d
from tensorflow.python.ops.signal.fft_ops import fft3d
from tensorflow.python.ops.signal.fft_ops import fftshift
from tensorflow.python.ops.signal.fft_ops import rfft
from tensorflow.python.ops.signal.fft_ops import rfft2d
from tensorflow.python.ops.signal.fft_ops import rfft3d
from tensorflow.python.ops.signal.dct_ops import idct
from tensorflow.python.ops.signal.fft_ops import ifft
from tensorflow.python.ops.signal.fft_ops import ifft2d
from tensorflow.python.ops.signal.fft_ops import ifft3d
from tensorflow.python.ops.signal.fft_ops import ifftshift
from tensorflow.python.ops.signal.fft_ops import irfft
from tensorflow.python.ops.signal.fft_ops import irfft2d
from tensorflow.python.ops.signal.fft_ops import irfft3d
from tensorflow.python.ops.signal.mel_ops import linear_to_mel_weight_matrix
from tensorflow.python.ops.signal.mfcc_ops import mfccs_from_log_mel_spectrograms
from tensorflow.python.ops.signal.reconstruction_ops import overlap_and_add
from tensorflow.python.ops.signal.shape_ops import frame
from tensorflow.python.ops.signal.spectral_ops import inverse_stft
from tensorflow.python.ops.signal.spectral_ops import inverse_stft_window_fn
from tensorflow.python.ops.signal.spectral_ops import stft
from tensorflow.python.ops.signal.window_ops import hamming_window
from tensorflow.python.ops.signal.window_ops import hann_window
# pylint: enable=unused-import
|
tensorflow-master
|
tensorflow/python/ops/signal/signal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Spectral operations (e.g. Short-time Fourier Transform)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.ops.signal import reconstruction_ops
from tensorflow.python.ops.signal import shape_ops
from tensorflow.python.ops.signal import window_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.stft')
def stft(signals, frame_length, frame_step, fft_length=None,
window_fn=window_ops.hann_window,
pad_end=False, name=None):
"""Computes the [Short-time Fourier Transform][stft] of `signals`.
Implemented with GPU-compatible ops and supports gradients.
Args:
signals: A `[..., samples]` `float32` `Tensor` of real-valued signals.
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT to apply.
If not provided, uses the smallest power of 2 enclosing `frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
pad_end: Whether to pad the end of `signals` with zeros when the provided
frame length and step produces a frame that lies partially past its end.
name: An optional name for the operation.
Returns:
A `[..., frames, fft_unique_bins]` `Tensor` of `complex64` STFT values where
`fft_unique_bins` is `fft_length // 2 + 1` (the unique components of the
FFT).
Raises:
ValueError: If `signals` is not at least rank 1, `frame_length` is
not scalar, or `frame_step` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'stft', [signals, frame_length,
frame_step]):
signals = ops.convert_to_tensor(signals, name='signals')
signals.shape.with_rank_at_least(1)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
framed_signals = shape_ops.frame(
signals, frame_length, frame_step, pad_end=pad_end)
# Optionally window the framed signals.
if window_fn is not None:
window = window_fn(frame_length, dtype=framed_signals.dtype)
framed_signals *= window
# fft_ops.rfft produces the (fft_length/2 + 1) unique components of the
# FFT of the real windowed signals in framed_signals.
return fft_ops.rfft(framed_signals, [fft_length])
@tf_export('signal.inverse_stft_window_fn')
def inverse_stft_window_fn(frame_step,
forward_window_fn=window_ops.hann_window,
name=None):
"""Generates a window function that can be used in `inverse_stft`.
Constructs a window that is equal to the forward window with a further
pointwise amplitude correction. `inverse_stft_window_fn` is equivalent to
`forward_window_fn` in the case where it would produce an exact inverse.
See examples in `inverse_stft` documentation for usage.
Args:
frame_step: An integer scalar `Tensor`. The number of samples to step.
forward_window_fn: window_fn used in the forward transform, `stft`.
name: An optional name for the operation.
Returns:
A callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype.
The returned window is suitable for reconstructing original waveform in
inverse_stft.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
return inverse_stft_window_fn_inner
@tf_export('signal.inverse_stft')
def inverse_stft(stfts,
frame_length,
frame_step,
fft_length=None,
window_fn=window_ops.hann_window,
name=None):
"""Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.
To reconstruct an original waveform, a complimentary window function should
be used in inverse_stft. Such a window function can be constructed with
tf.signal.inverse_stft_window_fn.
Example:
```python
frame_length = 400
frame_step = 160
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(waveform, frame_length, frame_step)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(frame_step))
```
if a custom window_fn is used in stft, it must be passed to
inverse_stft_window_fn:
```python
frame_length = 400
frame_step = 160
window_fn = functools.partial(window_ops.hamming_window, periodic=True),
waveform = tf.compat.v1.placeholder(dtype=tf.float32, shape=[1000])
stft = tf.signal.stft(
waveform, frame_length, frame_step, window_fn=window_fn)
inverse_stft = tf.signal.inverse_stft(
stft, frame_length, frame_step,
window_fn=tf.signal.inverse_stft_window_fn(
frame_step, forward_window_fn=window_fn))
```
Implemented with GPU-compatible ops and supports gradients.
Args:
stfts: A `complex64` `[..., frames, fft_unique_bins]` `Tensor` of STFT bins
representing a batch of `fft_length`-point STFTs where `fft_unique_bins`
is `fft_length // 2 + 1`
frame_length: An integer scalar `Tensor`. The window length in samples.
frame_step: An integer scalar `Tensor`. The number of samples to step.
fft_length: An integer scalar `Tensor`. The size of the FFT that produced
`stfts`. If not provided, uses the smallest power of 2 enclosing
`frame_length`.
window_fn: A callable that takes a window length and a `dtype` keyword
argument and returns a `[window_length]` `Tensor` of samples in the
provided datatype. If set to `None`, no windowing is used.
name: An optional name for the operation.
Returns:
A `[..., samples]` `Tensor` of `float32` signals representing the inverse
STFT for each input STFT in `stfts`.
Raises:
ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,
`frame_step` is not scalar, or `fft_length` is not scalar.
[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
"""
with ops.name_scope(name, 'inverse_stft', [stfts]):
stfts = ops.convert_to_tensor(stfts, name='stfts')
stfts.shape.with_rank_at_least(2)
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
frame_step = ops.convert_to_tensor(frame_step, name='frame_step')
frame_step.shape.assert_has_rank(0)
if fft_length is None:
fft_length = _enclosing_power_of_two(frame_length)
else:
fft_length = ops.convert_to_tensor(fft_length, name='fft_length')
fft_length.shape.assert_has_rank(0)
real_frames = fft_ops.irfft(stfts, [fft_length])
# frame_length may be larger or smaller than fft_length, so we pad or
# truncate real_frames to frame_length.
frame_length_static = tensor_util.constant_value(frame_length)
# If we don't know the shape of real_frames's inner dimension, pad and
# truncate to frame_length.
if (frame_length_static is None or real_frames.shape.ndims is None or
real_frames.shape.as_list()[-1] is None):
real_frames = real_frames[..., :frame_length]
real_frames_rank = array_ops.rank(real_frames)
real_frames_shape = array_ops.shape(real_frames)
paddings = array_ops.concat(
[array_ops.zeros([real_frames_rank - 1, 2],
dtype=frame_length.dtype),
[[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)
real_frames = array_ops.pad(real_frames, paddings)
# We know real_frames's last dimension and frame_length statically. If they
# are different, then pad or truncate real_frames to frame_length.
elif real_frames.shape.as_list()[-1] > frame_length_static:
real_frames = real_frames[..., :frame_length_static]
elif real_frames.shape.as_list()[-1] < frame_length_static:
pad_amount = frame_length_static - real_frames.shape.as_list()[-1]
real_frames = array_ops.pad(real_frames,
[[0, 0]] * (real_frames.shape.ndims - 1) +
[[0, pad_amount]])
# The above code pads the inner dimension of real_frames to frame_length,
# but it does so in a way that may not be shape-inference friendly.
# Restore shape information if we are able to.
if frame_length_static is not None and real_frames.shape.ndims is not None:
real_frames.set_shape([None] * (real_frames.shape.ndims - 1) +
[frame_length_static])
# Optionally window and overlap-add the inner 2 dimensions of real_frames
# into a single [samples] dimension.
if window_fn is not None:
window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)
real_frames *= window
return reconstruction_ops.overlap_and_add(real_frames, frame_step)
def _enclosing_power_of_two(value):
"""Return 2**N for integer N such that 2**N >= value."""
value_static = tensor_util.constant_value(value)
if value_static is not None:
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(
2.0,
math_ops.ceil(
math_ops.log(math_ops.cast(value, dtypes.float32)) /
math_ops.log(2.0))), value.dtype)
|
tensorflow-master
|
tensorflow/python/ops/signal/spectral_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mel-Frequency Cepstral Coefficients (MFCCs) ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.signal import dct_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('signal.mfccs_from_log_mel_spectrograms')
def mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None):
"""Computes [MFCCs][mfcc] of `log_mel_spectrograms`.
Implemented with GPU-compatible ops and supports gradients.
[Mel-Frequency Cepstral Coefficient (MFCC)][mfcc] calculation consists of
taking the DCT-II of a log-magnitude mel-scale spectrogram. [HTK][htk]'s MFCCs
use a particular scaling of the DCT-II which is almost orthogonal
normalization. We follow this convention.
All `num_mel_bins` MFCCs are returned and it is up to the caller to select
a subset of the MFCCs based on their application. For example, it is typical
to only use the first few for speech recognition, as this results in
an approximately pitch-invariant representation of the signal.
For example:
```python
sample_rate = 16000.0
# A Tensor of [batch_size, num_samples] mono PCM samples in the range [-1, 1].
pcm = tf.compat.v1.placeholder(tf.float32, [None, None])
# A 1024-point STFT with frames of 64 ms and 75% overlap.
stfts = tf.signal.stft(pcm, frame_length=1024, frame_step=256,
fft_length=1024)
spectrograms = tf.abs(stfts)
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1].value
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
```
Args:
log_mel_spectrograms: A `[..., num_mel_bins]` `float32` `Tensor` of
log-magnitude mel-scale spectrograms.
name: An optional name for the operation.
Returns:
A `[..., num_mel_bins]` `float32` `Tensor` of the MFCCs of
`log_mel_spectrograms`.
Raises:
ValueError: If `num_mel_bins` is not positive.
[mfcc]: https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
[htk]: https://en.wikipedia.org/wiki/HTK_(software)
"""
with ops.name_scope(name, 'mfccs_from_log_mel_spectrograms',
[log_mel_spectrograms]):
# Compute the DCT-II of the resulting log-magnitude mel-scale spectrogram.
# The DCT used in HTK scales every basis vector by sqrt(2/N), which is the
# scaling required for an "orthogonal" DCT-II *except* in the 0th bin, where
# the true orthogonal DCT (as implemented by scipy) scales by sqrt(1/N). For
# this reason, we don't apply orthogonal normalization and scale the DCT by
# `0.5 * sqrt(2/N)` manually.
log_mel_spectrograms = ops.convert_to_tensor(log_mel_spectrograms,
dtype=dtypes.float32)
if (log_mel_spectrograms.shape.ndims and
log_mel_spectrograms.shape.dims[-1].value is not None):
num_mel_bins = log_mel_spectrograms.shape.dims[-1].value
if num_mel_bins == 0:
raise ValueError('num_mel_bins must be positive. Got: %s' %
log_mel_spectrograms)
else:
num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1]
dct2 = dct_ops.dct(log_mel_spectrograms, type=2)
return dct2 * math_ops.rsqrt(
math_ops.cast(num_mel_bins, dtypes.float32) * 2.0)
|
tensorflow-master
|
tensorflow/python/ops/signal/mfcc_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast-Fourier Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
"""Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
fft_shape = _tensor_util.constant_value_as_shape(fft_length)
# Edge case: skip padding empty tensors.
if (input_tensor.shape.ndims is not None and
any(dim.value == 0 for dim in input_tensor.shape.dims)):
return input_tensor
# If we know the shapes ahead of time, we can either skip or pre-compute the
# appropriate paddings. Otherwise, fall back to computing paddings in
# TensorFlow.
if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
# Slice the last FFT-rank dimensions from input_tensor's shape.
input_fft_shape = input_tensor.shape[-fft_shape.ndims:]
if input_fft_shape.is_fully_defined():
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_shape = fft_shape[:-1].concatenate(
fft_shape.dims[-1].value // 2 + 1)
paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
for fft_dim, input_dim in zip(
fft_shape.dims, input_fft_shape.dims)]
if any(pad > 0 for _, pad in paddings):
outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
fft_shape.ndims), 0)
return _array_ops.pad(input_tensor, outer_paddings + paddings)
return input_tensor
# If we can't determine the paddings ahead of time, then we have to pad. If
# the paddings end up as zero, tf.pad has a special-case that does no work.
input_rank = _array_ops.rank(input_tensor)
input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_length = _array_ops.concat([fft_length[:-1],
fft_length[-1:] // 2 + 1], 0)
fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
axis=1)
return _array_ops.pad(input_tensor, paddings)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
"""Wrapper irfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,
is_reverse=True)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
# FFT/IFFT 1/2/3D are exported via
# third_party/tensorflow/core/api_def/python_api/
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft)
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft)
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d)
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d)
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d)
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d)
def _fft_size_for_grad(grad, rank):
return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:])
@_ops.RegisterGradient("FFT")
def _fft_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype)
return ifft(grad) * size
@_ops.RegisterGradient("IFFT")
def _ifft_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype),
grad.dtype)
return fft(grad) * rsize
@_ops.RegisterGradient("FFT2D")
def _fft2d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype)
return ifft2d(grad) * size
@_ops.RegisterGradient("IFFT2D")
def _ifft2d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype),
grad.dtype)
return fft2d(grad) * rsize
@_ops.RegisterGradient("FFT3D")
def _fft3d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype)
return ifft3d(grad) * size
@_ops.RegisterGradient("IFFT3D")
def _ifft3d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype),
grad.dtype)
return fft3d(grad) * rsize
def _rfft_grad_helper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
input_shape = _array_ops.shape(op.inputs[0])
is_even = _math_ops.cast(1 - (fft_length[-1] % 2), _dtypes.complex64)
def _tile_for_broadcasting(matrix, t):
expanded = _array_ops.reshape(
matrix,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32),
_array_ops.shape(matrix)
], 0))
return _array_ops.tile(
expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0))
def _mask_matrix(length):
"""Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len)."""
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = _array_ops.tile(
_array_ops.expand_dims(_math_ops.range(length), 0), (length, 1))
b = _array_ops.transpose(a, [1, 0])
return _math_ops.exp(
-2j * np.pi * _math_ops.cast(a * b, _dtypes.complex64) /
_math_ops.cast(length, _dtypes.complex64))
def _ymask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2),
_dtypes.complex64)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _ymask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _mask_matrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _tile_for_broadcasting(base_mask, y0)
y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = _array_ops.tile(
ym_term,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32),
[inner_dim]
], 0)) * _ymask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), _dtypes.float32)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
return _grad
def _irfft_grad_helper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
is_odd = _math_ops.mod(fft_length[-1], 2)
input_last_dimension = _array_ops.shape(op.inputs[0])[-1]
mask = _array_ops.concat(
[[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd]),
_array_ops.ones([1 - is_odd])], 0)
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), _dtypes.float32))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
the_rfft = rfft_fn(grad, fft_length)
return the_rfft * _math_ops.cast(rsize * mask, _dtypes.complex64), None
return _grad
@tf_export("signal.fftshift")
def fftshift(x, axes=None, name=None):
"""Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
@compatibility(numpy)
Equivalent to numpy.fft.fftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html
@end_compatibility
For example:
```python
x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple`, optional Axes over which to shift. Default is
None, which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "fftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = int(x.shape[axes] // 2)
else:
shift = [int((x.shape[ax]) // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
@tf_export("signal.ifftshift")
def ifftshift(x, axes=None, name=None):
"""The inverse of fftshift.
Although identical for even-length x,
the functions differ by one sample for odd-length x.
@compatibility(numpy)
Equivalent to numpy.fft.ifftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html
@end_compatibility
For example:
```python
x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])
x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None,
which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "ifftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [-int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = -int(x.shape[axes] // 2)
else:
shift = [-int(x.shape[ax] // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft))
_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft))
_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d))
_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
|
tensorflow-master
|
tensorflow/python/ops/signal/fft_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Discrete Cosine Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math as _math
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.util.tf_export import tf_export
def _validate_dct_arguments(input_tensor, dct_type, n, axis, norm):
"""Checks that DCT/IDCT arguments are compatible and well formed."""
if axis != -1:
raise NotImplementedError("axis must be -1. Got: %s" % axis)
if n is not None and n < 1:
raise ValueError("n should be a positive integer or None")
if dct_type not in (1, 2, 3):
raise ValueError("Only Types I, II and III (I)DCT are supported.")
if dct_type == 1:
if norm == "ortho":
raise ValueError("Normalization is not supported for the Type-I DCT.")
if input_tensor.shape[-1] is not None and input_tensor.shape[-1] < 2:
raise ValueError(
"Type-I DCT requires the dimension to be greater than one.")
if norm not in (None, "ortho"):
raise ValueError(
"Unknown normalization. Expected None or 'ortho', got: %s" % norm)
# TODO(rjryan): Implement `axis` parameter.
@tf_export("signal.dct", v1=["signal.dct", "spectral.dct"])
def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
Currently only Types I, II and III are supported.
Type I is implemented using a length `2N` padded `tf.signal.rfft`.
Type II is implemented using a length `2N` padded `tf.signal.rfft`, as
described here: [Type 2 DCT using 2N FFT padded (Makhoul)](https://dsp.stackexchange.com/a/10606).
Type III is a fairly straightforward inverse of Type II
(i.e. using a length `2N` padded `tf.signal.irfft`).
@compatibility(scipy)
Equivalent to [scipy.fftpack.dct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to
take the DCT of.
type: The DCT type to perform. Must be 1, 2 or 3.
n: The length of the transform. If length is less than sequence length,
only the first n elements of the sequence are considered for the DCT.
If n is greater than the sequence length, zeros are padded and then
the DCT is computed as usual.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the DCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `axis` is
not `-1`, `n` is not `None` or greater than 0,
or `norm` is not `None` or `'ortho'`.
ValueError: If `type` is `1` and `norm` is `ortho`.
[dct]: https://en.wikipedia.org/wiki/Discrete_cosine_transform
"""
_validate_dct_arguments(input, type, n, axis, norm)
with _ops.name_scope(name, "dct", [input]):
# We use the RFFT to compute the DCT and TensorFlow only supports float32
# for FFTs at the moment.
input = _ops.convert_to_tensor(input, dtype=_dtypes.float32)
seq_len = (
tensor_shape.dimension_value(input.shape[-1]) or
_array_ops.shape(input)[-1])
if n is not None:
if n <= seq_len:
input = input[..., 0:n]
else:
rank = len(input.shape)
padding = [[0, 0] for i in range(rank)]
padding[rank - 1][1] = n - seq_len
padding = _ops.convert_to_tensor(padding, dtype=_dtypes.int32)
input = _array_ops.pad(input, paddings=padding)
axis_dim = (tensor_shape.dimension_value(input.shape[-1])
or _array_ops.shape(input)[-1])
axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32)
if type == 1:
dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1)
dct1 = _math_ops.real(fft_ops.rfft(dct1_input))
return dct1
if type == 2:
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0, -_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
# TODO(rjryan): Benchmark performance and memory usage of the various
# approaches to computing a DCT via the RFFT.
dct2 = _math_ops.real(
fft_ops.rfft(
input, fft_length=[2 * axis_dim])[..., :axis_dim] * scale)
if norm == "ortho":
n1 = 0.5 * _math_ops.rsqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(2.0)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
dct2 *= weights
return dct2
elif type == 3:
if norm == "ortho":
n1 = _math_ops.sqrt(axis_dim_float)
n2 = n1 * _math_ops.sqrt(0.5)
# Use tf.pad to make a vector of [n1, n2, n2, n2, ...].
weights = _array_ops.pad(
_array_ops.expand_dims(n1, 0), [[0, axis_dim - 1]],
constant_values=n2)
input *= weights
else:
input *= axis_dim_float
scale = 2.0 * _math_ops.exp(
_math_ops.complex(
0.0,
_math_ops.range(axis_dim_float) * _math.pi * 0.5 /
axis_dim_float))
dct3 = _math_ops.real(
fft_ops.irfft(
scale * _math_ops.complex(input, 0.0),
fft_length=[2 * axis_dim]))[..., :axis_dim]
return dct3
# TODO(rjryan): Implement `n` and `axis` parameters.
@tf_export("signal.idct", v1=["signal.idct", "spectral.idct"])
def idct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disable=redefined-builtin
"""Computes the 1D [Inverse Discrete Cosine Transform (DCT)][idct] of `input`.
Currently only Types I, II and III are supported. Type III is the inverse of
Type II, and vice versa.
Note that you must re-normalize by 1/(2n) to obtain an inverse if `norm` is
not `'ortho'`. That is:
`signal == idct(dct(signal)) * 0.5 / signal.shape[-1]`.
When `norm='ortho'`, we have:
`signal == idct(dct(signal, norm='ortho'), norm='ortho')`.
@compatibility(scipy)
Equivalent to [scipy.fftpack.idct](https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.idct.html)
for Type-I, Type-II and Type-III DCT.
@end_compatibility
Args:
input: A `[..., samples]` `float32` `Tensor` containing the signals to take
the DCT of.
type: The IDCT type to perform. Must be 1, 2 or 3.
n: For future expansion. The length of the transform. Must be `None`.
axis: For future expansion. The axis to compute the DCT along. Must be `-1`.
norm: The normalization to apply. `None` for no normalization or `'ortho'`
for orthonormal normalization.
name: An optional name for the operation.
Returns:
A `[..., samples]` `float32` `Tensor` containing the IDCT of `input`.
Raises:
ValueError: If `type` is not `1`, `2` or `3`, `n` is not `None, `axis` is
not `-1`, or `norm` is not `None` or `'ortho'`.
[idct]:
https://en.wikipedia.org/wiki/Discrete_cosine_transform#Inverse_transforms
"""
_validate_dct_arguments(input, type, n, axis, norm)
inverse_type = {1: 1, 2: 3, 3: 2}[type]
return dct(input, type=inverse_type, n=n, axis=axis, norm=norm, name=name)
|
tensorflow-master
|
tensorflow/python/ops/signal/dct_ops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility ops shared across tf.contrib.signal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fractions
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
# TPU requires static shape inference. GCD is used for subframe size
# computation, so we should prefer static computation where possible.
const_a = tensor_util.constant_value(a)
const_b = tensor_util.constant_value(b)
if const_a is not None and const_b is not None:
return ops.convert_to_tensor(fractions.gcd(const_a, const_b))
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
|
tensorflow-master
|
tensorflow/python/ops/signal/util_ops.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.