python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import chi2 as chi2_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Chi2Test(test.TestCase):
def testChi2LogPDF(self):
with self.cached_session():
batch_size = 6
df = constant_op.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = chi2_lib.Chi2(df=df)
expected_log_pdf = stats.chi2.logpdf(x, df_v)
log_pdf = chi2.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = chi2.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testChi2CDF(self):
with self.cached_session():
batch_size = 6
df = constant_op.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = chi2_lib.Chi2(df=df)
expected_cdf = stats.chi2.cdf(x, df_v)
cdf = chi2.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testChi2Mean(self):
with self.cached_session():
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_mean = stats.chi2.mean(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.mean().get_shape(), (3,))
self.assertAllClose(chi2.mean().eval(), expected_mean)
def testChi2Variance(self):
with self.cached_session():
df_v = np.array([1., 3, 5], np.float64)
expected_variances = stats.chi2.var(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.variance().get_shape(), (3,))
self.assertAllClose(chi2.variance().eval(), expected_variances)
def testChi2Entropy(self):
with self.cached_session():
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_entropy = stats.chi2.entropy(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.entropy().get_shape(), (3,))
self.assertAllClose(chi2.entropy().eval(), expected_entropy)
def testChi2WithAbsDf(self):
with self.cached_session():
df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
chi2 = chi2_lib.Chi2WithAbsDf(df=df_v)
self.assertAllClose(
math_ops.floor(math_ops.abs(df_v)).eval(),
chi2.df.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateStudentsT Distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy import special
from tensorflow.contrib.distributions.python.ops.vector_student_t import _VectorStudentT
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _FakeVectorStudentT(object):
"""Fake scipy implementation for Multivariate Student's t-distribution.
Technically we don't need to test the `Vector Student's t-distribution` since
its composed of only unit-tested parts. However this _FakeVectorStudentT
serves as something like an end-to-end test of the
`TransformedDistribution + Affine` API.
Other `Vector*` implementations need only test new code. That we don't need
to test every Vector* distribution is good because there aren't SciPy
analogs and reimplementing everything in NumPy sort of defeats the point of
having the `TransformedDistribution + Affine` API.
"""
def __init__(self, df, loc, scale_tril):
self._df = np.asarray(df)
self._loc = np.asarray(loc)
self._scale_tril = np.asarray(scale_tril)
def log_prob(self, x):
def _compute(df, loc, scale_tril, x):
k = scale_tril.shape[-1]
ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
logz = ildj + k * (0.5 * np.log(df) +
0.5 * np.log(np.pi) +
special.gammaln(0.5 * df) -
special.gammaln(0.5 * (df + 1.)))
y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
lower=True, overwrite_b=True)
logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
return logs - logz
if not self._df.shape:
return _compute(self._df, self._loc, self._scale_tril, x)
return np.concatenate([
[_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
for i in range(len(self._df))]).T
def prob(self, x):
return np.exp(self.log_prob(x))
class VectorStudentTTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testProbStaticScalar(self):
with self.cached_session():
# Scalar batch_shape.
df = np.asarray(3., dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1], dtype=np.float32)
scale_diag = np.asarray([2.], dtype=np.float32)
scale_tril = np.diag(scale_diag)
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
x = 2. * self._rng.rand(4, 1).astype(np.float32) - 1.
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbStatic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransform(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransform(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransformDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PoissonLogNormalQuadratureCompoundTest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import poisson_lognormal
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _PoissonLogNormalQuadratureCompoundTest(
test_util.DiscreteScalarDistributionTestHelpers):
"""Tests the PoissonLogNormalQuadratureCompoundTest distribution."""
def testSampleProbConsistent(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
-2.,
shape=[] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.1,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=1, rtol=0.1)
def testMeanVariance(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
0.,
shape=[] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.02)
def testSampleProbConsistentBroadcastScalar(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[0., -0.5],
shape=[2] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=2, rtol=0.1, atol=0.01)
def testMeanVarianceBroadcastScalar(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[0., -0.5],
shape=[2] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.1, atol=0.01)
def testSampleProbConsistentBroadcastBoth(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[[0.], [-0.5]],
shape=[2, 1] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
[[1., 0.9]],
shape=[1, 2] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=4, rtol=0.1, atol=0.08)
def testMeanVarianceBroadcastBoth(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[[0.], [-0.5]],
shape=[2, 1] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
[[1., 0.9]],
shape=[1, 2] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.1, atol=0.01)
class PoissonLogNormalQuadratureCompoundStaticShapeTest(
_PoissonLogNormalQuadratureCompoundTest, test.TestCase):
@property
def static_shape(self):
return True
class PoissonLogNormalQuadratureCompoundDynamicShapeTest(
_PoissonLogNormalQuadratureCompoundTest, test.TestCase):
@property
def static_shape(self):
return False
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.cached_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl_divergence(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl_divergence(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl_divergence(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl_divergence(
baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl_divergence(
baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl_divergence(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl_divergence(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(2, "analytical_covariance:\n{}".format(
analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
logging.vlog(2, "kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
logging.vlog(2, "kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
logging.vlog(
2, "kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
logging.vlog(
2, "kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
logging.vlog(2, "kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
logging.vlog(2, "kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.cached_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import batch_reshape as batch_reshape_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_lib
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops import wishart as wishart_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = wishart_lib.WishartFull(df=5, scale=scale_ph)
reshape_wishart = batch_reshape_lib.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=42)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=42)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = array_ops.reshape(
wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = array_ops.reshape(
wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = array_ops.reshape(
wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = array_ops.reshape(
wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = array_ops.reshape(
wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = array_ops.reshape(
wishart.variance(), expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = normal_lib.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = batch_reshape_lib.BatchReshape(
distribution=normal,
batch_shape=new_batch_shape_ph,
validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=52)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=52)
expected_log_prob_shape = new_batch_shape
expected_log_prob = array_ops.reshape(
normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = array_ops.reshape(
normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = array_ops.reshape(
normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = array_ops.reshape(
normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = array_ops.reshape(
normal.variance(), expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=62)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=62)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = array_ops.reshape(
mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = array_ops.reshape(
mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = array_ops.reshape(
mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = array_ops.reshape(
mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = array_ops.reshape(
mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = array_ops.reshape(
mvn.covariance(), expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
expected_covariance_, actual_covariance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
expected_covariance, actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2] # 2 != 2*3
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r"Shape sizes do not match."):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
# Unknown first dimension does not trigger size check. Note that
# any dimension < 0 is treated statically as unknown.
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_vector_shape(self):
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = array_ops.placeholder_with_default(
rate_,
shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = poisson_lib.Poisson(rate)
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = batch_reshape_lib.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape:
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_4).eval()
with self.assertRaisesOpError("unexpected batch and event shape"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_114).eval()
class BatchReshapeStaticTest(_BatchReshapeTest, test.TestCase):
dtype = np.float32
is_static_shape = True
class BatchReshapeDynamicTest(_BatchReshapeTest, test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SeedStream class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import seed_stream
from tensorflow.python.platform import test
class SeedStreamTest(test.TestCase):
def assertAllUnique(self, items):
self.assertEqual(len(items), len(set(items)))
def testNonRepetition(self):
# The probability of repetitions in a short stream from a correct
# PRNG is negligible; this test catches bugs that prevent state
# updates.
strm = seed_stream.SeedStream(seed=4, salt="salt")
output = [strm() for _ in range(50)]
self.assertEqual(sorted(output), sorted(list(set(output))))
def testReproducibility(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="salt")
strm3 = seed_stream.SeedStream(seed=4, salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertEqual(outputs, [strm2() for _ in range(50)])
self.assertEqual(outputs, [strm3() for _ in range(50)])
def testSeededDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=5, salt="salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testSaltedDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="another salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testNestingRobustness(self):
# SeedStreams started from generated seeds should not collide with
# the master or with each other, even if the salts are the same.
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1(), salt="salt")
strm3 = seed_stream.SeedStream(strm1(), salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertAllUnique(
outputs + [strm2() for _ in range(50)] + [strm3() for _ in range(50)])
def testInitFromOtherSeedStream(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1, salt="salt")
strm3 = seed_stream.SeedStream(strm1, salt="another salt")
out1 = [strm1() for _ in range(50)]
out2 = [strm2() for _ in range(50)]
out3 = [strm3() for _ in range(50)]
self.assertAllEqual(out1, out2)
self.assertAllUnique(out1 + out3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/seed_stream_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.cached_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.cached_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.cached_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.cached_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.cached_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.cached_session():
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Condition x > 0 did not hold"):
_ = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
# Error detected statically; no need for _.mode().eval()
def testCauchyShape(self):
with self.cached_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
ds = distributions
class MultivariateNormalDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.MultivariateNormalDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = array_ops.zeros((1, 3))
diag = array_ops.ones((1, 3))
with self.cached_session():
base_dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
dist = ds.TransformedDistribution(
base_dist, validate_args=True, bijector=bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mean().eval())
def testEntropy(self):
mu = [-1., 1]
diag = [-1., 5]
diag_mat = np.diag(diag)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4)
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e3), seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu, mean.eval())
n = int(1e3)
samps = dist.sample(n, seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(
samps.transpose([1, 2, 0]), samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0), atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov, atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 3, 0], [0, 0, 3]],
[[2, 0, 0], [0, 2, 0], [0, 0, 2]]])**2.,
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 2, 0], [0, 0, 1]],
[[4, 0, 0], [0, 5, 0], [0, 0, 6]]])**2.,
mvn.covariance().eval())
def testVariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(np.ones([3], dtype=np.float32), mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]])**2.,
mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]])**2.,
mvn.variance().eval())
def testStddev(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(np.ones([3], dtype=np.float32), mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]]),
mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]]),
mvn.stddev().eval())
def testMultivariateNormalDiagWithSoftplusScale(self):
mu = [-1.0, 1.0]
diag = [-1.0, -2.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagWithSoftplusScale(
mu, diag, validate_args=True)
samps = dist.sample(1000, seed=0).eval()
cov_mat = array_ops.matrix_diag(nn_ops.softplus(diag)).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
def testMultivariateNormalDiagNegLogLikelihood(self):
num_draws = 50
dims = 3
with self.cached_session() as sess:
x_pl = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, dims], name="x")
mu_var = variable_scope.get_variable(
name="mu",
shape=[dims],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(1.))
sess.run([variables.global_variables_initializer()])
mvn = ds.MultivariateNormalDiag(
loc=mu_var,
scale_diag=array_ops.ones(shape=[dims], dtype=dtypes.float32))
# Typically you'd use `mvn.log_prob(x_pl)` which is always at least as
# numerically stable as `tf.math.log(mvn.prob(x_pl))`. However in this
# test we're testing a bug specific to `prob` and not `log_prob`;
# http://stackoverflow.com/q/45109305. (The underlying issue was not
# related to `Distributions` but that `reduce_prod` didn't correctly
# handle negative indexes.)
neg_log_likelihood = -math_ops.reduce_sum(math_ops.log(mvn.prob(x_pl)))
grad_neg_log_likelihood = gradients_impl.gradients(
neg_log_likelihood, variables.trainable_variables())
x = np.zeros([num_draws, dims], dtype=np.float32)
grad_neg_log_likelihood_ = sess.run(
grad_neg_log_likelihood, feed_dict={x_pl: x})
self.assertEqual(1, len(grad_neg_log_likelihood_))
self.assertAllClose(
grad_neg_log_likelihood_[0],
np.tile(num_draws, dims),
rtol=1e-6,
atol=0.)
def testDynamicBatchShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]))
self.assertListEqual(mvn.batch_shape.as_list(), [None, None])
self.assertListEqual(mvn.event_shape.as_list(), [2])
def testDynamicEventShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]))
self.assertListEqual(mvn.batch_shape.as_list(), [2, 3])
self.assertListEqual(mvn.event_shape.as_list(), [None])
def testKLDivIdenticalGradientDefined(self):
dims = 3
with self.cached_session() as sess:
loc = array_ops.zeros([dims], dtype=dtypes.float32)
mvn = ds.MultivariateNormalDiag(
loc=loc, scale_diag=np.ones([dims], dtype=np.float32))
g = gradients_impl.gradients(ds.kl_divergence(mvn, mvn), loc)
g_ = sess.run(g)
self.assertAllEqual(np.ones_like(g_, dtype=np.bool), np.isfinite(g_))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import autoregressive as autoregressive_lib
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.platform import test
class AutogressiveTest(test_util.VectorDistributionTestHelpers, test.TestCase):
"""Tests the Autoregressive distribution."""
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_scale_tril(self, event_size):
n = np.int32(event_size * (event_size + 1) // 2)
p = 2. * self._rng.random_sample(n).astype(np.float32) - 1.
return distribution_util.fill_triangular(0.25 * p)
def _normal_fn(self, affine_bijector):
def _fn(samples):
scale = math_ops.exp(affine_bijector.forward(samples))
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
def testSampleAndLogProbConsistency(self):
batch_shape = []
event_size = 2
with self.cached_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, ar, radius=1., center=0., rtol=0.01)
def testCompareToBijector(self):
"""Demonstrates equivalence between TD, Bijector approach and AR dist."""
sample_shape = np.int32([4, 5])
batch_shape = np.int32([])
event_size = np.int32(2)
with self.cached_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
ar_flow = MaskedAutoregressiveFlow(
is_constant_jacobian=True,
shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
validate_args=True)
td = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ar_flow,
event_shape=[event_size],
batch_shape=batch_shape,
validate_args=True)
x_shape = np.concatenate(
[sample_shape, batch_shape, [event_size]], axis=0)
x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
td_log_prob_, ar_log_prob_ = sess.run([td.log_prob(x), ar.log_prob(x)])
self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mixture
from tensorflow.contrib.distributions.python.ops import mixture_same_family
from tensorflow.contrib.distributions.python.ops import mvn_diag
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.linalg import linear_operator_diag
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
def _matrix_diag(d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_tril is not None:
scale_tril = np.tril(scale_tril)
if scale_diag is not None:
scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))
if scale_identity_multiplier is not None:
scale_tril += (
scale_identity_multiplier * _matrix_diag(np.ones(
[scale_tril.shape[-1]], dtype=np.float32)))
return scale_tril
return _make_diag_scale(
loc, scale_diag, scale_identity_multiplier, shape_hint)
def _make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_diag is not None:
scale_diag = np.asarray(scale_diag)
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier
return _matrix_diag(scale_diag)
if loc is None and shape_hint is None:
return None
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
scale_identity_multiplier = 1.
return scale_identity_multiplier * np.diag(np.ones(shape_hint))
class MakeTrilScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_tril_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_tril_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_tril_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]],
"scale_tril": [[[[1., 0., 0.],
[-3., 3., 0.],
[1., -2., 1.]],
[[2., 1., 0.],
[-4., 7., 0.],
[1., -1., 1.]]]]
})
def testZeroTriU(self):
with self.cached_session():
scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])
self.assertAllClose([[1., 0], [1., 1.]], scale.to_dense().eval())
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_tril_scale(
scale_tril=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_tril_scale(
scale_tril=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class MakeDiagScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_diag_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_diag_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_diag_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.]
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]]
})
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_diag_scale(
scale_diag=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_diag_scale(
scale_diag=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class ShapesFromLocAndScaleTest(test.TestCase):
def test_static_loc_static_scale_non_matching_event_size_raises(self):
loc = constant_op.constant(np.zeros((2, 4)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
distribution_util.shapes_from_loc_and_scale(loc, scale)
def test_static_loc_static_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 2]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_static_loc_dynamic_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_static_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = constant_op.constant(np.ones((5, 2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session():
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
# batch_shape depends on both args, and so is dynamic. Since loc did not
# have static shape, we inferred event shape entirely from scale, and this
# is available statically.
self.assertAllEqual(
[5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_dynamic_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_none_loc_static_scale(self):
loc = None
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_none_loc_dynamic_scale(self):
loc = None
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 1], batch_shape)
self.assertAllEqual([3], event_shape)
class GetBroadcastShapeTest(test.TestCase):
def test_all_static_shapes_work(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.ones((1, 5, 3))
z = array_ops.ones(())
self.assertAllEqual([2, 5, 3],
distribution_util.get_broadcast_shape(x, y, z))
def test_with_some_dynamic_shapes_works(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.placeholder(x.dtype)
z = array_ops.ones(())
with self.cached_session() as sess:
bcast_shape = sess.run(
distribution_util.get_broadcast_shape(x, y, z),
feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)})
self.assertAllEqual([2, 5, 3], bcast_shape)
class TridiagTest(test.TestCase):
def testWorksCorrectlyNoBatches(self):
with self.cached_session():
self.assertAllEqual(
[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
distribution_util.tridiag(
[1., 2., 3.],
[4., 5., 6., 7.],
[8., 9., 10.]).eval())
def testWorksCorrectlyBatches(self):
with self.cached_session():
self.assertAllClose(
[[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
[[0.7, 0.1, 0.0, 0.0],
[0.8, 0.6, 0.2, 0.0],
[0.0, 0.9, 0.5, 0.3],
[0.0, 0.0, 1.0, 0.4]]],
distribution_util.tridiag(
[[1., 2., 3.],
[0.8, 0.9, 1.]],
[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]],
[[8., 9., 10.],
[0.1, 0.2, 0.3]]).eval(),
rtol=1e-5, atol=0.)
def testHandlesNone(self):
with self.cached_session():
self.assertAllClose(
[[[4., 0., 0., 0.],
[0., 5., 0., 0.],
[0., 0., 6., 0.],
[0., 0., 0, 7.]],
[[0.7, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.4]]],
distribution_util.tridiag(
diag=[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]]).eval(),
rtol=1e-5, atol=0.)
class MixtureStddevTest(test.TestCase):
def test_mixture_dev(self):
mixture_weights = np.array([
[1.0/3, 1.0/3, 1.0/3],
[0.750, 0.250, 0.000]
])
component_means = np.array([
[1.0, 1.0, 1.0],
[-5, 0, 1.25]
])
component_devs = np.array([
[1.0, 1.0, 1.0],
[0.01, 2.0, 0.1]
])
# The first case should trivially have a standard deviation of 1.0 because
# all components are identical and have that standard deviation.
# The second case was computed by hand.
expected_devs = np.array([
1.0,
2.3848637277
])
weights_tf = array_ops.constant(mixture_weights)
means_tf = array_ops.constant(component_means)
sigmas_tf = array_ops.constant(component_devs)
mix_dev = distribution_util.mixture_stddev(weights_tf,
means_tf,
sigmas_tf)
with self.cached_session() as sess:
actual_devs = sess.run(mix_dev)
self.assertAllClose(actual_devs, expected_devs)
class PadMixtureDimensionsTest(test.TestCase):
def test_pad_mixture_dimensions_mixture(self):
with self.cached_session() as sess:
gm = mixture.Mixture(
cat=categorical.Categorical(probs=[[0.3, 0.7]]),
components=[
normal.Normal(loc=[-1.0], scale=[1.0]),
normal.Normal(loc=[1.0], scale=[0.5])
])
x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.cat, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
def test_pad_mixture_dimensions_mixture_same_family(self):
with self.cached_session() as sess:
gm = mixture_same_family.MixtureSameFamily(
mixture_distribution=categorical.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1.0, 0.5]))
x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.mixture_distribution, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2, 1])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
class _PadTest(object):
def testNegAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (constant_op.constant(value_) if self.is_static_shape
else array_ops.placeholder_with_default(value_, shape=None))
count = (constant_op.constant(count_) if self.is_static_shape
else array_ops.placeholder_with_default(count_, shape=None))
x0_front = distribution_util.pad(
x, axis=-2, value=value, count=count, front=True)
x0_back = distribution_util.pad(
x, axis=-2, count=count, back=True)
x0_both = distribution_util.pad(
x, axis=-2, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([4, 3], x0_front.shape)
self.assertAllEqual([4, 3], x0_back.shape)
self.assertAllEqual([4, 3], x0_both.shape)
[x0_front_, x0_back_, x0_both_] = sess.run([
x0_front, x0_back, x0_both])
self.assertAllClose(
np.float32([[value_]*3,
[value_]*3,
[1, 2, 3],
[4, 5, 6]]),
x0_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3],
[4, 5, 6],
[0.]*3,
[0.]*3]),
x0_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_]*3,
[1, 2, 3],
[4, 5, 6],
[value_]*3]),
x0_both_, atol=0., rtol=1e-6)
def testPosAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (constant_op.constant(value_) if self.is_static_shape
else array_ops.placeholder_with_default(value_, shape=None))
count = (constant_op.constant(count_) if self.is_static_shape
else array_ops.placeholder_with_default(count_, shape=None))
x1_front = distribution_util.pad(
x, axis=1, value=value, count=count, front=True)
x1_back = distribution_util.pad(
x, axis=1, count=count, back=True)
x1_both = distribution_util.pad(
x, axis=1, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([2, 5], x1_front.shape)
self.assertAllEqual([2, 5], x1_back.shape)
self.assertAllEqual([2, 5], x1_both.shape)
[x1_front_, x1_back_, x1_both_] = sess.run([
x1_front, x1_back, x1_both])
self.assertAllClose(
np.float32([[value_]*2 + [1, 2, 3],
[value_]*2 + [4, 5, 6]]),
x1_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3] + [0.]*2,
[4, 5, 6] + [0.]*2]),
x1_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_, 1, 2, 3, value_],
[value_, 4, 5, 6, value_]]),
x1_both_, atol=0., rtol=1e-6)
class PadStaticTest(_PadTest, test.TestCase):
@property
def is_static_shape(self):
return True
class PadDynamicTest(_PadTest, test.TestCase):
@property
def is_static_shape(self):
return False
@test_util.run_all_in_graph_and_eager_modes
class TestMoveDimension(test.TestCase):
def test_move_dimension_static_shape(self):
x = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 1, 1)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 0, 3)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 0, -2)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 6, 4, 1])
def test_move_dimension_dynamic_shape(self):
x_ = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
x = array_ops.placeholder_with_default(input=x_, shape=None)
x_perm = distribution_util.move_dimension(x, 1, 1)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 0, 3)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 0, -2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 6, 4, 1])
x_perm = distribution_util.move_dimension(x, -1, 2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 6, 4, 1])
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorExponentialLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
class VectorExponentialDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.VectorExponentialDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1. + 1., 1. - 5.], dist.mean().eval())
def testMode(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mode().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1. + 1, -1. - 5], dist.mean().eval())
def testSample(self):
mu = [-2., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e4), seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
self.assertAllClose([-2 + 1, 1. - 2], samps.mean(axis=0),
atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T),
atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu + diag, mean.eval())
n = int(1e4)
samps = dist.sample(n, seed=0).eval()
samps_centered = samps - samps.mean(axis=0)
cov_mat = array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps_centered.transpose([1, 2, 0]),
samps_centered.transpose([1, 0, 2])) / n
self.assertAllClose(mu + diag, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
vex.covariance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], vex.batch_shape)
self.assertAllEqual([3], vex.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 3, 0],
[0, 0, 3]],
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]]])**2.,
vex.covariance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], vex.batch_shape)
self.assertAllEqual([3], vex.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 2, 0],
[0, 0, 1]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]])**2.,
vex.covariance().eval())
def testVariance(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
vex.variance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2., 2, 2]])**2.,
vex.variance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1],
[4., 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4., 5, 6]])**2.,
vex.variance().eval())
def testStddev(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
vex.stddev().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2., 2, 2]]),
vex.stddev().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4., 5, 6]]),
vex.stddev().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorLaplaceLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
class VectorLaplaceDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.VectorLaplaceDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = array_ops.zeros((1, 3))
diag = array_ops.ones((1, 3))
with self.cached_session():
base_dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
dist = ds.TransformedDistribution(
base_dist,
validate_args=True,
bijector=bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mean().eval())
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e4), seed=0).eval()
cov_mat = 2. * array_ops.matrix_diag(diag).eval()**2
self.assertAllClose(mu, samps.mean(axis=0),
atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T),
atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu, mean.eval())
n = int(1e4)
samps = dist.sample(n, seed=0).eval()
cov_mat = 2. * array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps.transpose([1, 2, 0]),
samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
2. * np.diag(np.ones([3], dtype=np.float32)),
vla.covariance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0],
[0, 3, 0],
[0, 0, 3]],
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]]])**2.,
vla.covariance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0],
[0, 2, 0],
[0, 0, 1]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]])**2.,
vla.covariance().eval())
def testVariance(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
2. * np.ones([3], dtype=np.float32),
vla.variance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
2. * np.array([[3., 3, 3],
[2, 2, 2]])**2.,
vla.variance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1],
[4, 5, 6]])
self.assertAllClose(
2. * np.array([[3., 2, 1],
[4, 5, 6]])**2.,
vla.variance().eval())
def testStddev(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.sqrt(2) * np.ones([3], dtype=np.float32),
vla.stddev().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.sqrt(2) * np.array([[3., 3, 3],
[2, 2, 2]]),
vla.stddev().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.sqrt(2) * np.array([[3., 2, 1],
[4, 5, 6]]),
vla.stddev().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
rng = np.random.RandomState(123)
class QuantizedDistributionTest(test.TestCase):
def _assert_all_finite(self, array):
self.assertTrue(np.isfinite(array).all())
def testQuantizationOfUniformWithCutoffsHavingNoEffect(self):
with self.cached_session() as sess:
# The Quantized uniform with cutoffs == None divides the real line into:
# R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# Since this uniform (below) is supported on [0, 3],
# it places 1/3 of its mass in the intervals j = 1, 2, 3.
# Adding a cutoff at y = 0 changes the picture to
# R = ...(-inf, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# So the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
# Adding a cutoff at y = 3 changes the picture to
# R = ...(-1, 0](0, 1](1, 2](2, inf)
# j = ... 0 1 2 3
# and the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
for lcut, ucut in [(None, None), (0.0, None), (None, 3.0), (0.0, 3.0),
(-10., 10.)]:
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=0.0, high=3.0),
low=lcut,
high=ucut)
# pmf
pmf_n1, pmf_0, pmf_1, pmf_2, pmf_3, pmf_4, pmf_5 = sess.run(
qdist.prob([-1., 0., 1., 2., 3., 4., 5.]))
# uniform had no mass below -1.
self.assertAllClose(0., pmf_n1)
# uniform had no mass below 0.
self.assertAllClose(0., pmf_0)
# uniform put 1/3 of its mass in each of (0, 1], (1, 2], (2, 3],
# which are the intervals j = 1, 2, 3.
self.assertAllClose(1 / 3, pmf_1)
self.assertAllClose(1 / 3, pmf_2)
self.assertAllClose(1 / 3, pmf_3)
# uniform had no mass in (3, 4] or (4, 5], which are j = 4, 5.
self.assertAllClose(0 / 3, pmf_4)
self.assertAllClose(0 / 3, pmf_5)
# cdf
cdf_n1, cdf_0, cdf_1, cdf_2, cdf_2p5, cdf_3, cdf_4, cdf_5 = sess.run(
qdist.cdf([-1., 0., 1., 2., 2.5, 3., 4., 5.]))
self.assertAllClose(0., cdf_n1)
self.assertAllClose(0., cdf_0)
self.assertAllClose(1 / 3, cdf_1)
self.assertAllClose(2 / 3, cdf_2)
# Note fractional values allowed for cdfs of discrete distributions.
# And adding 0.5 makes no difference because the quantized dist has
# mass only on the integers, never in between.
self.assertAllClose(2 / 3, cdf_2p5)
self.assertAllClose(3 / 3, cdf_3)
self.assertAllClose(3 / 3, cdf_4)
self.assertAllClose(3 / 3, cdf_5)
def testQuantizationOfUniformWithCutoffsInTheMiddle(self):
with self.cached_session() as sess:
# The uniform is supported on [-3, 3]
# Consider partitions the real line in intervals
# ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ...
# Before cutoffs, the uniform puts a mass of 1/6 in each interval written
# above. Because of cutoffs, the qdist considers intervals and indices
# ...(-infty, -1](-1, 0](0, infty) ...
# -1 0 1
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=-3., high=3.),
low=-1.0,
high=1.0)
# pmf
cdf_n3, cdf_n2, cdf_n1, cdf_0, cdf_0p5, cdf_1, cdf_10 = sess.run(
qdist.cdf([-3., -2., -1., 0., 0.5, 1.0, 10.0]))
# Uniform had no mass on (-4, -3] or (-3, -2]
self.assertAllClose(0., cdf_n3)
self.assertAllClose(0., cdf_n2)
# Uniform had 1/6 of its mass in each of (-3, -2], and (-2, -1], which
# were collapsed into (-infty, -1], which is now the "-1" interval.
self.assertAllClose(1 / 3, cdf_n1)
# The j=0 interval contained mass from (-3, 0], which is 1/2 of the
# uniform's mass.
self.assertAllClose(1 / 2, cdf_0)
# Adding 0.5 makes no difference because the quantized dist has mass on
# the integers, not in between them.
self.assertAllClose(1 / 2, cdf_0p5)
# After applying the cutoff, all mass was either in the interval
# (0, infty), or below. (0, infty) is the interval indexed by j=1,
# so pmf(1) should equal 1.
self.assertAllClose(1., cdf_1)
# Since no mass of qdist is above 1,
# pmf(10) = P[Y <= 10] = P[Y <= 1] = pmf(1).
self.assertAllClose(1., cdf_10)
def testQuantizationOfBatchOfUniforms(self):
batch_shape = (5, 5)
with self.cached_session():
# The uniforms are supported on [0, 10]. The qdist considers the
# intervals
# ... (0, 1](1, 2]...(9, 10]...
# with the intervals displayed above each holding 1 / 10 of the mass.
# The qdist will be defined with no cutoffs,
uniform = distributions.Uniform(
low=array_ops.zeros(batch_shape, dtype=dtypes.float32),
high=10 * array_ops.ones(batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=uniform, low=None, high=None)
# x is random integers in {-3,...,12}.
x = rng.randint(-3, 13, size=batch_shape).astype(np.float32)
# pmf
# qdist.prob(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise,
expected_pmf = (1 / 10) * np.ones(batch_shape)
expected_pmf[x < 1] = 0.
expected_pmf[x > 10] = 0.
self.assertAllClose(expected_pmf, qdist.prob(x).eval())
# cdf
# qdist.cdf(j)
# = 0 for j < 1
# = j / 10, for j in {1,...,10},
# = 1, for j > 10.
expected_cdf = x.copy() / 10
expected_cdf[x < 1] = 0.
expected_cdf[x > 10] = 1.
self.assertAllClose(expected_cdf, qdist.cdf(x).eval())
def testSamplingFromBatchOfNormals(self):
batch_shape = (2,)
with self.cached_session():
normal = distributions.Normal(
loc=array_ops.zeros(
batch_shape, dtype=dtypes.float32),
scale=array_ops.ones(
batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=normal, low=0., high=None)
samps = qdist.sample(5000, seed=42)
samps_v = samps.eval()
# With low = 0, the interval j=0 is (-infty, 0], which holds 1/2
# of the mass of the normals.
# rtol chosen to be 2x as large as necessary to pass.
self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
# The interval j=1 is (0, 1], which is from the mean to one standard
# deviation out. This should contain 0.6827 / 2 of the mass.
self.assertAllClose(
[0.6827 / 2, 0.6827 / 2], (samps_v == 1).mean(axis=0), rtol=0.03)
def testSamplesAgreeWithCdfForSamplesOverLargeRange(self):
# Consider the cdf for distribution X, F(x).
# If U ~ Uniform[0, 1], then Y := F^{-1}(U) is distributed like X since
# P[Y <= y] = P[F^{-1}(U) <= y] = P[U <= F(y)] = F(y).
# If F is a bijection, we also have Z = F(X) is Uniform.
#
# Make an exponential with large mean (= 100). This ensures we will get
# quantized values over a large range. This large range allows us to
# pretend that the cdf F is a bijection, and hence F(X) is uniform.
# Note that F cannot be bijection since it is constant between the
# integers. Hence, F(X) (see below) will not be uniform exactly.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.01))
# X ~ QuantizedExponential
x = qdist.sample(10000, seed=42)
# Z = F(X), should be Uniform.
z = qdist.cdf(x)
# Compare the CDF of Z to that of a Uniform.
# dist = maximum distance between P[Z <= a] and P[U <= a].
# We ignore pvalue, since of course this distribution is not exactly, and
# with so many sample points we would get a false fail.
dist, _ = stats.kstest(z.eval(), "uniform")
# Since the distribution take values (approximately) in [0, 100], the
# cdf should have jumps (approximately) every 1/100 of the way up.
# Assert that the jumps are not more than 2/100.
self.assertLess(dist, 0.02)
def testSamplesAgreeWithPdfForSamplesOverSmallRange(self):
# Testing that samples and pdf agree for a small range is important because
# it makes sure the bin edges are consistent.
# Make an exponential with mean 5.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.2))
# Standard error should be less than 1 / (2 * sqrt(n_samples))
n_samples = 10000
stddev_err_bound = 1 / (2 * np.sqrt(n_samples))
samps = qdist.sample((n_samples,), seed=42).eval()
# The smallest value the samples can take on is 1, which corresponds to
# the interval (0, 1]. Recall we use ceiling in the sampling definition.
self.assertLess(0.5, samps.min())
x_vals = np.arange(1, 11).astype(np.float32)
pmf_vals = qdist.prob(x_vals).eval()
for ii in range(10):
self.assertAllClose(
pmf_vals[ii], (samps == x_vals[ii]).mean(), atol=stddev_err_bound)
def testNormalCdfAndSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.cdf(x), qdist.cdf(x).eval())
self.assertAllClose(sp_normal.sf(x), qdist.survival_function(x).eval())
def testNormalLogCdfAndLogSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.logcdf(x), qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x), qdist.log_survival_function(x).eval())
def testNormalProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(sm_normal.cdf(-2), qdist.prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
sm_normal.cdf(-1) - sm_normal.cdf(-2), qdist.prob(-1.).eval(), atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
sm_normal.cdf(0) - sm_normal.cdf(-1), qdist.prob(0.).eval(), atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(1. - sm_normal.cdf(1), qdist.prob(2.).eval(), atol=0)
def testNormalLogProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)), qdist.log_prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)), qdist.log_prob(2.).eval(), atol=0)
def testLogProbAndGradGivesFiniteResults(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(0., name="mu", dtype=dtype)
sigma = variables.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = np.arange(-100, 100, 2).astype(dtype)
proba = qdist.log_prob(x)
grads = gradients_impl.gradients(proba, [mu, sigma])
with self.session(graph=g):
variables.global_variables_initializer().run()
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testProbAndGradGivesFiniteResultsForCommonEvents(self):
with self.cached_session():
mu = variables.Variable(0.0, name="mu")
sigma = variables.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = math_ops.ceil(4 * rng.rand(100).astype(np.float32) - 2)
variables.global_variables_initializer().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = gradients_impl.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testLowerCutoffMustBeBelowUpperCutoffOrWeRaise(self):
with self.cached_session():
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"must be strictly less"):
_ = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=1., # not strictly less than high.
high=1.,
validate_args=True)
# Error detected statically; no need for _.sample().eval()
def testCutoffsMustBeIntegerValuedIfValidateArgsTrue(self):
with self.cached_session():
low = array_ops.placeholder(dtypes.float32)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=low,
high=10.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("has non-integer components"):
qdist.sample().eval(feed_dict={low: 1.5})
def testCutoffsCanBeFloatValuedIfValidateArgsFalse(self):
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=0., scale=1., validate_args=False),
low=1.5,
high=10.11)
self.assertFalse(qdist.validate_args) # Default is True.
# Should not raise
qdist.sample().eval()
def testDtypeAndShapeInheritedFromBaseDist(self):
batch_shape = (2, 3)
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=array_ops.zeros(batch_shape),
scale=array_ops.zeros(batch_shape)),
low=1.0,
high=10.0)
self.assertEqual(batch_shape, qdist.batch_shape)
self.assertAllEqual(batch_shape, qdist.batch_shape_tensor().eval())
self.assertEqual((), qdist.event_shape)
self.assertAllEqual((), qdist.event_shape_tensor().eval())
samps = qdist.sample(10, seed=42)
self.assertEqual((10,) + batch_shape, samps.get_shape())
self.assertAllEqual((10,) + batch_shape, samps.eval().shape)
y = rng.randint(0, 5, size=batch_shape).astype(np.float32)
self.assertEqual(batch_shape, qdist.prob(y).get_shape())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError("Subclass failed to implement `use_static_shape`")
@property
def dtype(self):
raise NotImplementedError("Subclass failed to implement `dtype`.")
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(
self, x, axis, max_lags, center, normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append((
np.take(x, indices=range(0, axis_len - m), axis=axis) *
np.conj(np.take(x, indices=range(m, axis_len), axis=axis))
).mean(axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = array_ops.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
auto_corr = sample_stats.auto_correlation(
x_ph, axis=axis, max_lags=max_lags, center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, auto_corr.eval(), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1))
* np.ones((1, 10))).ravel().astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
class AutoCorrelationTestStaticShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
class AutoCorrelationTestStaticShapeComplex64(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
class AutoCorrelationTestDynamicShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "interpolation"):
sample_stats.percentile(x, q=0.5, interpolation="bad")
def test_vector_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "Expected.*ndims"):
sample_stats.percentile(x, q=[0.5])
def test_vector_q_raises_dynamic(self):
x = [1., 5., 3., 2., 4.]
q_ph = array_ops.placeholder(dtypes.float32)
pct = sample_stats.percentile(x, q=q_ph, validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("rank"):
pct.eval(feed_dict={q_ph: [0.5]})
def test_finds_max_of_long_array(self):
# d - 1 == d in float32 and d = 3e7.
# So this test only passes if we use double for the percentile indices.
# If float is used, it fails with InvalidArgumentError about an index out of
# bounds.
x = math_ops.linspace(0., 3e7, num=int(3e7))
with self.cached_session():
minval = sample_stats.percentile(x, q=0, validate_args=True)
self.assertAllEqual(0, minval.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
def _mixture_stddev_np(pi_vector, mu_vector, sigma_vector):
"""Computes the standard deviation of a univariate mixture distribution.
Acts upon `np.array`s (not `tf.Tensor`s).
Args:
pi_vector: A `np.array` of mixture weights. Shape `[batch, components]`.
mu_vector: A `np.array` of means. Shape `[batch, components]`
sigma_vector: A `np.array` of stddevs. Shape `[batch, components]`.
Returns:
A `np.array` containing the batch of standard deviations.
"""
pi_vector = np.expand_dims(pi_vector, axis=1)
mean_wa = np.matmul(pi_vector, np.expand_dims(mu_vector, axis=2))
var_wa = np.matmul(pi_vector, np.expand_dims(sigma_vector**2, axis=2))
mid_term = np.matmul(pi_vector, np.expand_dims(mu_vector**2, axis=2))
mixture_variance = (
np.squeeze(var_wa) + np.squeeze(mid_term) - np.squeeze(mean_wa**2))
return np.sqrt(mixture_variance)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag _call_sample_n."""
data_container = []
true_mvndiag_call_sample_n = (
ds.MultivariateNormalDiag._call_sample_n)
def _capturing_mvndiag_call_sample_n(
self, sample_shape, seed, name, **kwargs):
samples = true_mvndiag_call_sample_n(
self, sample_shape, seed, name, **kwargs)
data_container.append(samples)
return samples
ds.MultivariateNormalDiag._call_sample_n = (
_capturing_mvndiag_call_sample_n)
yield data_container
ds.MultivariateNormalDiag._call_sample_n = (
true_mvndiag_call_sample_n)
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal _call_sample_n."""
data_container = []
true_normal_call_sample_n = ds.Normal._call_sample_n
def _capturing_normal_call_sample_n(self, sample_shape, seed, name, **kwargs):
samples = true_normal_call_sample_n(
self, sample_shape, seed, name, **kwargs)
data_container.append(samples)
return samples
ds.Normal._call_sample_n = _capturing_normal_call_sample_n
yield data_container
ds.Normal._call_sample_n = true_normal_call_sample_n
def make_univariate_mixture(batch_shape, num_components, use_static_graph):
batch_shape = ops.convert_to_tensor(batch_shape, dtypes.int32)
logits = random_ops.random_uniform(
array_ops.concat((batch_shape, [num_components]), axis=0),
-1, 1, dtype=dtypes.float32) - 50.
components = [
ds.Normal(
loc=random_ops.random_normal(batch_shape),
scale=10 * random_ops.random_uniform(batch_shape))
for _ in range(num_components)
]
cat = ds.Categorical(logits, dtype=dtypes.int32)
return ds.Mixture(cat, components, use_static_graph=use_static_graph)
def make_multivariate_mixture(batch_shape, num_components, event_shape,
use_static_graph, batch_shape_tensor=None):
if batch_shape_tensor is None:
batch_shape_tensor = batch_shape
batch_shape_tensor = ops.convert_to_tensor(batch_shape_tensor, dtypes.int32)
logits = random_ops.random_uniform(
array_ops.concat((batch_shape_tensor, [num_components]), 0),
-1, 1, dtype=dtypes.float32) - 50.
logits.set_shape(
tensor_shape.TensorShape(batch_shape).concatenate(num_components))
static_batch_and_event_shape = (
tensor_shape.TensorShape(batch_shape).concatenate(event_shape))
event_shape = ops.convert_to_tensor(event_shape, dtypes.int32)
batch_and_event_shape = array_ops.concat((batch_shape_tensor, event_shape), 0)
def create_component():
loc = random_ops.random_normal(batch_and_event_shape)
scale_diag = 10 * random_ops.random_uniform(batch_and_event_shape)
loc.set_shape(static_batch_and_event_shape)
scale_diag.set_shape(static_batch_and_event_shape)
return ds.MultivariateNormalDiag(
loc=loc, scale_diag=scale_diag)
components = [create_component() for _ in range(num_components)]
cat = ds.Categorical(logits, dtype=dtypes.int32)
return ds.Mixture(cat, components, use_static_graph=use_static_graph)
class MixtureTest(test.TestCase):
use_static_graph = False
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10,
use_static_graph=self.use_static_graph)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape,
use_static_graph=self.use_static_graph)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(event_shape, dist.event_shape)
self.assertAllEqual(event_shape, dist.event_shape_tensor().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
ds.Mixture(
ds.Categorical([0.1, 0.5]), # 2 classes
[ds.Normal(loc=1.0, scale=2.0)],
use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
ds.Mixture(
ds.Categorical([-0.5, 0.5]), # scalar batch
[
ds.Normal(
loc=1.0, scale=2.0), # scalar dist
ds.Normal(
loc=[1.0, 1.0], scale=[2.0, 2.0])
],
use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = array_ops.placeholder(shape=[1, None], dtype=dtypes.float32)
ds.Mixture(
ds.Categorical(cat_logits),
[ds.Normal(
loc=[1.0], scale=[2.0])],
use_static_graph=self.use_static_graph)
def testBrokenShapesDynamic(self):
with self.cached_session():
d0_param = array_ops.placeholder(dtype=dtypes.float32)
d1_param = array_ops.placeholder(dtype=dtypes.float32)
d = ds.Mixture(
ds.Categorical([0.1, 0.2]), [
ds.Normal(
loc=d0_param, scale=d0_param), ds.Normal(
loc=d1_param, scale=d1_param)
],
validate_args=True,
use_static_graph=self.use_static_graph)
if self.use_static_graph:
error_string = r"Shapes of all inputs must match"
else:
error_string = r"batch shape must match"
with self.assertRaisesOpError(error_string):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(error_string):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
ds.Mixture(None, [], use_static_graph=self.use_static_graph)
cat = ds.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
ds.Mixture(cat, [None], use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
ds.Mixture(
cat, [
ds.Normal(loc=[1.0], scale=[2.0]),
ds.Normal(loc=[np.float16(1.0)],
scale=[np.float16(2.0)]),
], use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
ds.Mixture(ds.Categorical([0.3, 0.2]), None,
use_static_graph=self.use_static_graph)
# TODO(ebrevdo): once distribution Domains have been added, add a
# test to ensure that the domains of the distributions in a
# mixture are checked for equivalence.
def testMeanUnivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2,
use_static_graph=self.use_static_graph)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,),
use_static_graph=self.use_static_graph)
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testStddevShapeUnivariate(self):
num_components = 2
# This is the same shape test which is done in 'testMeanUnivariate'.
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=num_components,
use_static_graph=self.use_static_graph)
dev = dist.stddev()
self.assertEqual(batch_shape, dev.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_devs = [d.stddev() for d in dist.components]
dist_means = [d.mean() for d in dist.components]
res = sess.run([dev, cat_probs, dist_devs, dist_means])
dev_value, cat_probs_values, dist_devs_values, dist_means_values = res
# Manual computation of stddev.
batch_shape_res = cat_probs_values.shape[:-1]
event_shape_res = dist_devs_values[0].shape[len(batch_shape_res):]
stacked_mean_res = np.stack(dist_means_values, -1)
stacked_dev_res = np.stack(dist_devs_values, -1)
# Broadcast cat probs over event dimensions.
for _ in range(len(event_shape_res)):
cat_probs_values = np.expand_dims(cat_probs_values, len(batch_shape))
cat_probs_values = cat_probs_values + np.zeros_like(stacked_dev_res) # pylint: disable=g-no-augmented-assignment
# Perform stddev computation on a flattened batch.
flat_batch_manual_dev = _mixture_stddev_np(
np.reshape(cat_probs_values, [-1, num_components]),
np.reshape(stacked_mean_res, [-1, num_components]),
np.reshape(stacked_dev_res, [-1, num_components]))
# Reshape to full shape.
full_shape_res = list(batch_shape_res) + list(event_shape_res)
manual_dev = np.reshape(flat_batch_manual_dev, full_shape_res)
self.assertEqual(batch_shape, dev_value.shape)
self.assertAllClose(manual_dev, dev_value)
def testStddevShapeMultivariate(self):
num_components = 2
# This is the same shape test which is done in 'testMeanMultivariate'.
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape,
num_components=num_components,
event_shape=(4,),
use_static_graph=self.use_static_graph)
dev = dist.stddev()
self.assertEqual(batch_shape + (4,), dev.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_devs = [d.stddev() for d in dist.components]
dist_means = [d.mean() for d in dist.components]
res = sess.run([dev, cat_probs, dist_devs, dist_means])
dev_value, cat_probs_values, dist_devs_values, dist_means_values = res
# Manual computation of stddev.
batch_shape_res = cat_probs_values.shape[:-1]
event_shape_res = dist_devs_values[0].shape[len(batch_shape_res):]
stacked_mean_res = np.stack(dist_means_values, -1)
stacked_dev_res = np.stack(dist_devs_values, -1)
# Broadcast cat probs over event dimensions.
for _ in range(len(event_shape_res)):
cat_probs_values = np.expand_dims(cat_probs_values, len(batch_shape))
cat_probs_values = cat_probs_values + np.zeros_like(stacked_dev_res) # pylint: disable=g-no-augmented-assignment
# Perform stddev computation on a flattened batch.
flat_batch_manual_dev = _mixture_stddev_np(
np.reshape(cat_probs_values, [-1, num_components]),
np.reshape(stacked_mean_res, [-1, num_components]),
np.reshape(stacked_dev_res, [-1, num_components]))
# Reshape to full shape.
full_shape_res = list(batch_shape_res) + list(event_shape_res)
manual_dev = np.reshape(flat_batch_manual_dev, full_shape_res)
self.assertEqual(tuple(full_shape_res), dev_value.shape)
self.assertAllClose(manual_dev, dev_value)
def testSpecificStddevValue(self):
cat_probs = np.array([0.5, 0.5])
component_means = np.array([-10, 0.1])
component_devs = np.array([0.05, 2.33])
ground_truth_stddev = 5.3120805
mixture_dist = ds.Mixture(
cat=ds.Categorical(probs=cat_probs),
components=[
ds.Normal(loc=component_means[0],
scale=component_devs[0]),
ds.Normal(loc=component_means[1],
scale=component_devs[1]),
],
use_static_graph=self.use_static_graph)
mix_dev = mixture_dist.stddev()
with self.cached_session() as sess:
actual_stddev = sess.run(mix_dev)
self.assertAllClose(actual_stddev, ground_truth_stddev)
def testProbScalarUnivariate(self):
with self.cached_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2,
use_static_graph=self.use_static_graph)
for x in [
np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32),
np.random.randn(3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.cached_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3],
use_static_graph=self.use_static_graph)
for x in [
np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.cached_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2,
use_static_graph=self.use_static_graph)
for x in [
np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.cached_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4],
use_static_graph=self.use_static_graph)
for x in [
np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.cached_session() as sess:
num_components = 3
batch_shape = []
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=num_components,
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c]
else:
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.cached_session():
n = 100
random_seed.set_random_seed(654321)
components = [
ds.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat = ds.Categorical(
logits, dtype=dtypes.int32, name="cat1")
dist1 = ds.Mixture(cat, components, name="mixture1",
use_static_graph=self.use_static_graph)
samples1 = dist1.sample(n, seed=123456).eval()
random_seed.set_random_seed(654321)
components2 = [
ds.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat2 = ds.Categorical(
logits, dtype=dtypes.int32, name="cat2")
dist2 = ds.Mixture(cat2, components2, name="mixture2",
use_static_graph=self.use_static_graph)
samples2 = dist2.sample(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.cached_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2],
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c, :]
else:
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.cached_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components,
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c_s, which_c_b0,
which_c_b1]
else:
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def _testSampleBatchMultivariate(self, fully_known_batch_shape):
with self.cached_session() as sess:
num_components = 3
if fully_known_batch_shape:
batch_shape = [2, 3]
batch_shape_tensor = [2, 3]
else:
batch_shape = [None, 3]
batch_shape_tensor = array_ops.placeholder(dtype=dtypes.int32)
dist = make_multivariate_mixture(
batch_shape=batch_shape,
num_components=num_components, event_shape=[4],
batch_shape_tensor=batch_shape_tensor,
use_static_graph=self.use_static_graph)
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
if fully_known_batch_shape:
self.assertEqual((5, 2, 3, 4), samples.get_shape())
else:
self.assertEqual([5, None, 3, 4], samples.get_shape().as_list())
cat_samples = dist.cat.sample(n, seed=123)
if fully_known_batch_shape:
feed_dict = {}
else:
feed_dict = {batch_shape_tensor: [2, 3]}
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples],
feed_dict=feed_dict)
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c_s, which_c_b0,
which_c_b1, :]
else:
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testSampleBatchMultivariateFullyKnownBatchShape(self):
self._testSampleBatchMultivariate(fully_known_batch_shape=True)
def testSampleBatchMultivariateNotFullyKnownBatchShape(self):
self._testSampleBatchMultivariate(fully_known_batch_shape=False)
def testEntropyLowerBoundMultivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,),
use_static_graph=self.use_static_graph)
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
def testCdfScalarUnivariate(self):
"""Tests CDF against scipy for a mixture of seven gaussians."""
# Construct a mixture of gaussians with seven components.
n_components = 7
# pre-softmax mixture probabilities.
mixture_weight_logits = np.random.uniform(
low=-1, high=1, size=(n_components,)).astype(np.float32)
def _scalar_univariate_softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# Construct the ds.Mixture object.
mixture_weights = _scalar_univariate_softmax(mixture_weight_logits)
means = [np.random.uniform(low=-10, high=10, size=()).astype(np.float32)
for _ in range(n_components)]
sigmas = [np.ones(shape=(), dtype=np.float32) for _ in range(n_components)]
cat_tf = ds.Categorical(probs=mixture_weights)
components_tf = [ds.Normal(loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
mixture_tf = ds.Mixture(cat=cat_tf, components=components_tf,
use_static_graph=self.use_static_graph)
x_tensor = array_ops.placeholder(shape=(), dtype=dtypes.float32)
# These are two test cases to verify.
xs_to_check = [
np.array(1.0, dtype=np.float32),
np.array(np.random.randn()).astype(np.float32)
]
# Carry out the test for both d.cdf and exp(d.log_cdf).
x_cdf_tf = mixture_tf.cdf(x_tensor)
x_log_cdf_tf = mixture_tf.log_cdf(x_tensor)
with self.cached_session() as sess:
for x_feed in xs_to_check:
x_cdf_tf_result, x_log_cdf_tf_result = sess.run(
[x_cdf_tf, x_log_cdf_tf], feed_dict={x_tensor: x_feed})
# Compute the cdf with scipy.
scipy_component_cdfs = [stats.norm.cdf(x=x_feed, loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
scipy_cdf_result = np.dot(mixture_weights,
np.array(scipy_component_cdfs))
self.assertAllClose(x_cdf_tf_result, scipy_cdf_result)
self.assertAllClose(np.exp(x_log_cdf_tf_result), scipy_cdf_result)
def testCdfBatchUnivariate(self):
"""Tests against scipy for a (batch of) mixture(s) of seven gaussians."""
n_components = 7
batch_size = 5
mixture_weight_logits = np.random.uniform(
low=-1, high=1, size=(batch_size, n_components)).astype(np.float32)
def _batch_univariate_softmax(x):
e_x = np.exp(x)
e_x_sum = np.expand_dims(np.sum(e_x, axis=1), axis=1)
return e_x / np.tile(e_x_sum, reps=[1, x.shape[1]])
psize = (batch_size,)
mixture_weights = _batch_univariate_softmax(mixture_weight_logits)
means = [np.random.uniform(low=-10, high=10, size=psize).astype(np.float32)
for _ in range(n_components)]
sigmas = [np.ones(shape=psize, dtype=np.float32)
for _ in range(n_components)]
cat_tf = ds.Categorical(probs=mixture_weights)
components_tf = [ds.Normal(loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
mixture_tf = ds.Mixture(cat=cat_tf, components=components_tf,
use_static_graph=self.use_static_graph)
x_tensor = array_ops.placeholder(shape=psize, dtype=dtypes.float32)
xs_to_check = [
np.array([1.0, 5.9, -3, 0.0, 0.0], dtype=np.float32),
np.random.randn(batch_size).astype(np.float32)
]
x_cdf_tf = mixture_tf.cdf(x_tensor)
x_log_cdf_tf = mixture_tf.log_cdf(x_tensor)
with self.cached_session() as sess:
for x_feed in xs_to_check:
x_cdf_tf_result, x_log_cdf_tf_result = sess.run(
[x_cdf_tf, x_log_cdf_tf],
feed_dict={x_tensor: x_feed})
# Compute the cdf with scipy.
scipy_component_cdfs = [stats.norm.cdf(x=x_feed, loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
weights_and_cdfs = zip(np.transpose(mixture_weights, axes=[1, 0]),
scipy_component_cdfs)
final_cdf_probs_per_component = [
np.multiply(c_p_value, d_cdf_value)
for (c_p_value, d_cdf_value) in weights_and_cdfs]
scipy_cdf_result = np.sum(final_cdf_probs_per_component, axis=0)
self.assertAllClose(x_cdf_tf_result, scipy_cdf_result)
self.assertAllClose(np.exp(x_log_cdf_tf_result), scipy_cdf_result)
def testSampleBimixGamma(self):
"""Tests a bug in the underlying tf.Gamma op.
Mixture's use of dynamic partition requires `random_gamma` correctly returns
an empty `Tensor`.
"""
with self.cached_session():
gm = ds.Mixture(
cat=ds.Categorical(probs=[.3, .7]),
components=[ds.Gamma(1., 2.),
ds.Gamma(2., 1.)],
use_static_graph=self.use_static_graph)
x_ = gm.sample().eval()
self.assertAllEqual([], x_.shape)
class MixtureStaticSampleTest(MixtureTest):
use_static_graph = True
class MixtureBenchmark(test.Benchmark):
use_static_graph = False
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
num_components, batch_size, num_features,
sample_size):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with session.Session(config=config, graph=ops.Graph()) as sess:
random_seed.set_random_seed(0)
with ops.device("/device:GPU:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(variables.global_variables_initializer())
reported = self.run_op_benchmark(
sess,
sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
(name, use_gpu, num_components, batch_size, num_features,
sample_size)))
logging.vlog(2, "\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) % (
use_gpu, num_components, batch_size, num_features, sample_size,
reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
logging.vlog(
2, "mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = ds.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)
]
components = list(
ds.MultivariateNormalDiag(
loc=mu, scale_diag=sigma) for (mu, sigma) in zip(mus, sigmas))
return ds.Mixture(cat, components, use_static_graph=self.use_static_graph)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
logging.vlog(
2, "mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = ds.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)
]
components = list(
ds.MultivariateNormalTriL(
loc=mu, scale_tril=linalg_ops.cholesky(sigma))
for (mu, sigma) in zip(mus, sigmas))
return ds.Mixture(cat, components, use_static_graph=self.use_static_graph)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
class MixtureStaticSampleBenchmark(MixtureBenchmark):
use_static_graph = True
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SinhArcsinh."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(123)
class SinhArcsinhTest(test.TestCase):
def test_default_is_same_as_normal(self):
b = 10
scale = rng.rand(b) + 0.5
loc = rng.randn(b)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
validate_args=True)
x = rng.randn(5, b)
norm_pdf, sasnorm_pdf = sess.run([norm.prob(x), sasnorm.prob(x)])
self.assertAllClose(norm_pdf, sasnorm_pdf)
norm_samps, sasnorm_samps = sess.run(
[norm.sample(10000, seed=0),
sasnorm.sample(10000, seed=0)])
self.assertAllClose(loc, sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.mean(axis=0), sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1)
def test_broadcast_params_dynamic(self):
with self.cached_session() as sess:
loc = array_ops.placeholder(dtypes.float64)
scale = array_ops.placeholder(dtypes.float64)
skewness = array_ops.placeholder(dtypes.float64)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
skewness=skewness,
validate_args=True)
samp = sess.run(sasnorm.sample(),
feed_dict={loc: rng.rand(5),
scale: np.float64(rng.rand()), # Scalar
skewness: rng.rand(5)})
self.assertAllEqual((5,), samp.shape)
def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self):
b = 10
scale = rng.rand(b) + 0.5
loc = rng.randn(b)
with self.cached_session() as sess:
lap = ds.Laplace(
loc=loc,
scale=scale,
validate_args=True)
saslap = ds.SinhArcsinh(
loc=loc,
scale=scale,
distribution=ds.Laplace(np.float64(0), np.float64(1)),
validate_args=True)
x = rng.randn(5, b)
lap_pdf, saslap_pdf = sess.run([lap.prob(x), saslap.prob(x)])
self.assertAllClose(lap_pdf, saslap_pdf)
lap_samps, saslap_samps = sess.run(
[lap.sample(10000, seed=0),
saslap.sample(10000, seed=0)])
self.assertAllClose(loc, saslap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
lap_samps.mean(axis=0), saslap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
lap_samps.std(axis=0), saslap_samps.std(axis=0), atol=0.1)
def test_tailweight_small_gives_fewer_outliers_than_normal(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = 0.1 * rng.randn(batch_size)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
tailweight=0.1,
validate_args=True)
# sasnorm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * batch_size, [10] * batch_size]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(sasnorm_lp, norm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the normal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=1),
sasnorm.sample(int(5e5), seed=1)])
np.testing.assert_array_less(
np.percentile(norm_samps, 0.1, axis=0),
np.percentile(sasnorm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 99.9, axis=0),
np.percentile(norm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_tailweight_large_gives_more_outliers_than_normal(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = np.float64(0.)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
tailweight=3.,
validate_args=True)
# norm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * batch_size, [10] * batch_size]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(norm_lp, sasnorm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the sasnormal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=2),
sasnorm.sample(int(5e5), seed=2)])
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 0.1, axis=0),
np.percentile(norm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(norm_samps, 99.9, axis=0),
np.percentile(sasnorm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_positive_skewness_moves_mean_to_the_right(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = rng.randn(batch_size)
with self.cached_session() as sess:
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
skewness=3.0,
validate_args=True)
sasnorm_samps = sess.run(sasnorm.sample(10000, seed=4))
np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0))
def test_pdf_reflected_for_negative_skewness(self):
with self.cached_session() as sess:
sas_pos_skew = ds.SinhArcsinh(
loc=0.,
scale=1.,
skewness=2.,
validate_args=True)
sas_neg_skew = ds.SinhArcsinh(
loc=0.,
scale=1.,
skewness=-2.,
validate_args=True)
x = np.linspace(-2, 2, num=5).astype(np.float32)
self.assertAllClose(
*sess.run([sas_pos_skew.prob(x), sas_neg_skew.prob(x[::-1])]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorSinhArcsinhDiag."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(123)
class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def test_default_is_same_as_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
x = rng.randn(5, d)
norm_pdf, sasnorm_pdf = sess.run([norm.prob(x), sasnorm.prob(x)])
self.assertAllClose(norm_pdf, sasnorm_pdf)
norm_samps, sasnorm_samps = sess.run(
[norm.sample(10000, seed=0),
sasnorm.sample(10000, seed=0)])
self.assertAllClose(loc, sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.mean(axis=0), sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1)
def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.2)
loc = rng.randn(d)
with self.cached_session() as sess:
vlap = ds.VectorLaplaceDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasvlap = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
distribution=ds.Laplace(np.float64(0.), np.float64(1.)),
validate_args=True)
x = rng.randn(5, d)
vlap_pdf, sasvlap_pdf = sess.run([vlap.prob(x), sasvlap.prob(x)])
self.assertAllClose(vlap_pdf, sasvlap_pdf)
vlap_samps, sasvlap_samps = sess.run(
[vlap.sample(10000, seed=0),
sasvlap.sample(10000, seed=0)])
self.assertAllClose(loc, sasvlap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
vlap_samps.mean(axis=0), sasvlap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
vlap_samps.std(axis=0), sasvlap_samps.std(axis=0), atol=0.1)
def test_tailweight_small_gives_fewer_outliers_than_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(0.9)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
tailweight=0.1,
validate_args=True)
# sasnorm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(sasnorm_lp, norm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the normal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=1),
sasnorm.sample(int(5e5), seed=1)])
np.testing.assert_array_less(
np.percentile(norm_samps, 0.1, axis=0),
np.percentile(sasnorm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 99.9, axis=0),
np.percentile(norm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_tailweight_large_gives_more_outliers_than_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
tailweight=3.,
validate_args=True)
# norm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(norm_lp, sasnorm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the sasnormal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=2),
sasnorm.sample(int(5e5), seed=2)])
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 0.1, axis=0),
np.percentile(norm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(norm_samps, 99.9, axis=0),
np.percentile(sasnorm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_positive_skewness_moves_mean_to_the_right(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=3.0,
validate_args=True)
sasnorm_samps = sess.run(sasnorm.sample(10000, seed=4))
np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0))
def test_consistency_random_parameters_with_batch_dim(self):
b, d = 5, 2
scale_diag = rng.rand(b, d)
scale_identity_multiplier = np.float64(1.1)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=rng.randn(d) * 0.5,
tailweight=rng.rand(b, d) + 0.7,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, sasnorm, radius=1.0, center=0., rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=-0.15,
rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=0.15,
rtol=0.1)
def test_consistency_random_parameters_no_batch_dims(self):
d = 3
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.1)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=rng.randn(d) * 0.5,
tailweight=rng.rand(d) + 0.7,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, sasnorm, radius=1.0, center=0., rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=-0.15,
rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=0.15,
rtol=0.1)
def test_pdf_reflected_for_negative_skewness(self):
with self.cached_session() as sess:
sas_pos_skew = ds.VectorSinhArcsinhDiag(
loc=[0.],
scale_identity_multiplier=1.,
skewness=2.,
validate_args=True)
sas_neg_skew = ds.VectorSinhArcsinhDiag(
loc=[0.],
scale_identity_multiplier=1.,
skewness=-2.,
validate_args=True)
x = np.linspace(-2, 2, num=5).astype(np.float32).reshape(5, 1)
self.assertAllClose(
*sess.run([sas_pos_skew.prob(x), sas_neg_skew.prob(x[::-1])]))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
tfd = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.cached_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.cached_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.cached_session():
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.cached_session():
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.cached_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testNameScopeWorksCorrectly(self):
x = tfd.Normal(loc=0., scale=1., name="x")
x_duplicate = tfd.Normal(loc=0., scale=1., name="x")
with ops.name_scope("y") as name:
y = tfd.Bernoulli(logits=0., name=name)
x_sample = x.sample(name="custom_sample")
x_sample_duplicate = x.sample(name="custom_sample")
x_log_prob = x.log_prob(0., name="custom_log_prob")
x_duplicate_sample = x_duplicate.sample(name="custom_sample")
self.assertEqual(x.name, "x/")
self.assertEqual(x_duplicate.name, "x_1/")
self.assertEqual(y.name, "y/")
self.assertTrue(x_sample.name.startswith("x/custom_sample"))
self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1"))
self.assertTrue(x_log_prob.name.startswith("x/custom_log_prob"))
self.assertTrue(x_duplicate_sample.name.startswith(
"x_1/custom_sample"))
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("tfp.distributions.Normal("
"\"Normal/\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)"), # Got the dtype right.
str(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("tfp.distributions.Chi2("
"\"silly/\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)"),
str(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("tfp.distributions.Exponential(\"Exponential/\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)"),
str(exp))
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN/\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)"),
str(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
if mvn_dynamic.batch_shape._v2_behavior:
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(None,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
else:
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
def testReprWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("<tfp.distributions.Normal"
" 'Normal/'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>"), # Got the dtype right.
repr(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("<tfp.distributions.Chi2"
" 'silly/'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>"),
repr(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("<tfp.distributions.Exponential"
" 'Exponential/'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>"),
repr(exp))
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN/'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>"),
repr(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
if mvn_dynamic.batch_shape._v2_behavior:
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(None,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
else:
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import binomial
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BinomialTest(test.TestCase):
def testSimpleShapes(self):
with self.cached_session():
p = np.float32(np.random.beta(1, 1))
binom = binomial.Binomial(total_count=1., probs=p)
self.assertAllEqual([], binom.event_shape_tensor().eval())
self.assertAllEqual([], binom.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), binom.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), binom.batch_shape)
def testComplexShapes(self):
with self.cached_session():
p = np.random.beta(1, 1, size=(3, 2)).astype(np.float32)
n = [[3., 2], [4, 5], [6, 7]]
binom = binomial.Binomial(total_count=n, probs=p)
self.assertAllEqual([], binom.event_shape_tensor().eval())
self.assertAllEqual([3, 2], binom.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), binom.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2]), binom.batch_shape)
def testNProperty(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
binom = binomial.Binomial(total_count=n, probs=p)
self.assertEqual((2, 1), binom.total_count.get_shape())
self.assertAllClose(n, binom.total_count.eval())
def testPProperty(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
binom = binomial.Binomial(total_count=3., probs=p)
self.assertEqual((1, 3), binom.probs.get_shape())
self.assertEqual((1, 3), binom.logits.get_shape())
self.assertAllClose(p, binom.probs.eval())
def testLogitsProperty(self):
logits = [[0., 9., -0.5]]
with self.cached_session():
binom = binomial.Binomial(total_count=3., logits=logits)
self.assertEqual((1, 3), binom.probs.get_shape())
self.assertEqual((1, 3), binom.logits.get_shape())
self.assertAllClose(logits, binom.logits.eval())
def testPmfAndCdfNandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
binom = binomial.Binomial(total_count=n, probs=p, validate_args=True)
binom.prob([2., 3, 2]).eval()
binom.prob([3., 1, 2]).eval()
binom.cdf([2., 3, 2]).eval()
binom.cdf([3., 1, 2]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
binom.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("Condition x <= y.*"):
binom.prob([7., 3, 0]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
binom.cdf([-1., 4, 2]).eval()
with self.assertRaisesOpError("Condition x <= y.*"):
binom.cdf([7., 3, 0]).eval()
def testPmfAndCdfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
binom = binomial.Binomial(total_count=n, probs=p, validate_args=True)
binom.prob([2., 3, 2]).eval()
binom.prob([3., 1, 2]).eval()
binom.cdf([2., 3, 2]).eval()
binom.cdf([3., 1, 2]).eval()
placeholder = array_ops.placeholder(dtypes.float32)
# Both equality and integer checking fail.
with self.assertRaisesOpError(
"cannot contain fractional components."):
binom.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
with self.assertRaisesOpError(
"cannot contain fractional components."):
binom.cdf(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
binom = binomial.Binomial(total_count=n, probs=p, validate_args=False)
binom.prob([1., 2., 3.]).eval()
binom.cdf([1., 2., 3.]).eval()
# Non-integer arguments work.
binom.prob([1.0, 2.5, 1.5]).eval()
binom.cdf([1.0, 2.5, 1.5]).eval()
def testPmfAndCdfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = 0.5
counts = 1.
binom = binomial.Binomial(total_count=1., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=1, p=p), cdf.eval())
self.assertEqual((), pmf.get_shape())
self.assertEqual((), cdf.get_shape())
def testPmfAndCdfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = 0.1
counts = 3.
binom = binomial.Binomial(total_count=5., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(stats.binom.pmf(counts, n=5., p=p), pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=5., p=p), cdf.eval())
self.assertEqual((), pmf.get_shape())
self.assertEqual((), cdf.get_shape())
def testPmfAndCdfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 2.]]
binom = binomial.Binomial(total_count=3., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(stats.binom.pmf(counts, n=3., p=p), pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=3., p=p), cdf.eval())
self.assertEqual((1, 2), pmf.get_shape())
self.assertEqual((1, 2), cdf.get_shape())
def testPmfAndCdfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.4]
counts = [[1.], [0.]]
binom = binomial.Binomial(total_count=1., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose([[0.1, 0.4], [0.9, 0.6]], pmf.eval())
self.assertAllClose([[1.0, 1.0], [0.9, 0.6]], cdf.eval())
self.assertEqual((2, 2), pmf.get_shape())
self.assertEqual((2, 2), cdf.get_shape())
def testBinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_means = stats.binom.mean(n, p)
self.assertEqual((3,), binom.mean().get_shape())
self.assertAllClose(expected_means, binom.mean().eval())
def testBinomialVariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_variances = stats.binom.var(n, p)
self.assertEqual((3,), binom.variance().get_shape())
self.assertAllClose(expected_variances, binom.variance().eval())
def testBinomialMode(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_modes = [0., 1, 4]
self.assertEqual((3,), binom.mode().get_shape())
self.assertAllClose(expected_modes, binom.mode().eval())
def testBinomialMultipleMode(self):
with self.cached_session():
n = 9.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
# For the case where (n + 1) * p is an integer, the modes are:
# (n + 1) * p and (n + 1) * p - 1. In this case, we get back
# the larger of the two modes.
expected_modes = [1., 2, 7]
self.assertEqual((3,), binom.mode().get_shape())
self.assertAllClose(expected_modes, binom.mode().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import deterministic as deterministic_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class DeterministicTest(test.TestCase):
def testShape(self):
with self.cached_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.Deterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3, 4))
self.assertAllEqual(deterministic.batch_shape, (2, 3, 4))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Condition x >= 0"):
_ = deterministic_lib.Deterministic(loc, atol=-1, validate_args=True)
# Error detected statically; no need for _.prob(0.).eval()
def testProbWithNoBatchDimsIntegerType(self):
deterministic = deterministic_lib.Deterministic(0)
with self.cached_session():
self.assertAllClose(1, deterministic.prob(0).eval())
self.assertAllClose(0, deterministic.prob(2).eval())
self.assertAllClose([1, 0], deterministic.prob([0, 2]).eval())
def testProbWithNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
with self.cached_session():
self.assertAllClose(1., deterministic.prob(0.).eval())
self.assertAllClose(0., deterministic.prob(2.).eval())
self.assertAllClose([1., 0.], deterministic.prob([0., 2.]).eval())
def testProbWithDefaultTol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc)
expected_prob = [[1., 0.], [0., 1.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_prob = [[1., 0.], [1., 1.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATolIntegerType(self):
loc = [[0, 1], [2, 3]]
x = [[0, 2], [4, 2]]
deterministic = deterministic_lib.Deterministic(loc, atol=1)
expected_prob = [[1, 1], [0, 1]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
loc = [[0., 1.], [100., 100.]]
x = [[0., 1.1], [100.1, 103.]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_prob = [[1., 0.], [1., 0.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTolIntegerType(self):
loc = [[10, 10, 10], [10, 10, 10]]
x = [[10, 20, 30], [10, 20, 30]]
# Batch 0 will have rtol = 0
# Batch 1 will have rtol = 1 (100% slack allowed)
deterministic = deterministic_lib.Deterministic(loc, rtol=[[0], [1]])
expected_prob = [[1, 0, 0], [1, 1, 0]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 3), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testCdfWithDefaultTol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc)
expected_cdf = [[0., 0.], [0., 1.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroATol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_cdf = [[0., 0.], [1., 1.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroRTol(self):
loc = [[1., 1.], [100., 100.]]
x = [[0.9, 1.], [99.9, 97]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_cdf = [[0., 1.], [1., 0.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape, sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.Deterministic([0., 0.])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2,)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.Deterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.cached_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [0., 0.],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2,)).astype(np.float32), sample_)
def testEntropy(self):
loc = np.array([-0.1, -3.2, 7.])
deterministic = deterministic_lib.Deterministic(loc=loc)
with self.cached_session() as sess:
entropy_ = sess.run(deterministic.entropy())
self.assertAllEqual(np.zeros(3), entropy_)
class VectorDeterministicTest(test.TestCase):
def testShape(self):
with self.cached_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.VectorDeterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3))
self.assertAllEqual(deterministic.batch_shape, (2, 3))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [4])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([4]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Condition x >= 0"):
_ = deterministic_lib.VectorDeterministic(
loc, atol=-1, validate_args=True)
# Error detected statically; no need for _.prob(loc).eval()
def testInvalidXRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.VectorDeterministic(
loc, atol=None, validate_args=True)
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
deterministic.prob(0.).eval()
def testProbVectorDeterministicWithNoBatchDims(self):
# 0 batch of deterministics on R^1.
deterministic = deterministic_lib.VectorDeterministic([0.])
with self.cached_session():
self.assertAllClose(1., deterministic.prob([0.]).eval())
self.assertAllClose(0., deterministic.prob([2.]).eval())
self.assertAllClose([1., 0.], deterministic.prob([[0.], [2.]]).eval())
def testProbWithDefaultTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc)
expected_prob = [1., 0., 0.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc, atol=0.05)
expected_prob = [1., 0., 1.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [1., 1.], [100., 100.]]
x = [[0., 1.], [0.9, 1.], [99.9, 100.1]]
deterministic = deterministic_lib.VectorDeterministic(loc, rtol=0.01)
expected_prob = [1., 0., 1.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZero(self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.cached_session():
self.assertAllClose(1., deterministic.prob([]).eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZeroRaisesIfXNotInSameRk(
self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("not defined in the same space"):
deterministic.prob([1.]).eval()
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([0.])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (1,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (1,)).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([[0.], [0.]])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2, 1), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2, 1)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.VectorDeterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.cached_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [[0.], [0.]],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2, 1)).astype(np.float32), sample_)
def testEntropy(self):
loc = np.array([[8.3, 1.2, 3.3], [-0.1, -3.2, 7.]])
deterministic = deterministic_lib.VectorDeterministic(loc=loc)
with self.cached_session() as sess:
entropy_ = sess.run(deterministic.entropy())
self.assertAllEqual(np.zeros(2), entropy_)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for OneHotCategorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import onehot_categorical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_onehot_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return onehot_categorical.OneHotCategorical(logits, dtype=dtype)
class OneHotCategoricalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testP(self):
p = [0.2, 0.8]
dist = onehot_categorical.OneHotCategorical(probs=p)
with self.cached_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = onehot_categorical.OneHotCategorical(logits=logits)
with self.cached_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
# event_shape is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10,
tensor_util.constant_value(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(
batch_shape, constant_op.constant(10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertEqual(10, dist.event_shape_tensor().eval())
def testDtype(self):
dist = make_onehot_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_onehot_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(dist.logits.dtype, dist.prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
self.assertEqual(dist.logits.dtype, dist.log_prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.cached_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = onehot_categorical.OneHotCategorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertAllEqual([0, 1], sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.cached_session():
self.assertAllClose(
dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.cached_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testPmf(self):
# check that probability of samples correspond to their class probabilities
with self.cached_session():
logits = self._rng.random_sample(size=(8, 2, 10))
prob = np.exp(logits)/np.sum(np.exp(logits), axis=-1, keepdims=True)
dist = onehot_categorical.OneHotCategorical(logits=logits)
np_sample = dist.sample().eval()
np_prob = dist.prob(np_sample).eval()
expected_prob = prob[np_sample.astype(np.bool)]
self.assertAllClose(expected_prob, np_prob.flatten())
def testSample(self):
with self.cached_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
n = 100
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertAllEqual([n, 1, 2, 2], sample_values.shape)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
def testSampleWithSampleShape(self):
with self.cached_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose([0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()],
atol=1e-2)
self.assertAllClose([0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()],
atol=1e-2)
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
logits = self._rng.rand(4, 3, 2).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(3e3)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
logits = self._rng.rand(3).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(1e4)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0) # elementwise mean
x_centered = x - sample_mean
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([3], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)
self.assertAllEqual([3, 3], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.1)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for computing moving-average statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import moving_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class MovingReduceMeanVarianceTest(test.TestCase):
def test_assign_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.cached_session() as sess:
# Start "x" out with this mean.
mean_var = variables.VariableV1(array_ops.zeros_like(true_mean))
variance_var = variables.VariableV1(array_ops.ones_like(true_stddev))
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.assign_moving_mean_variance(
mean_var, variance_var, x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
# Change the mean, var then update some more. Moving averages should
# re-converge.
sess.run([
mean_var.assign(np.array([[-1., 2.]])),
variance_var.assign(np.array([[2., 1.]])),
])
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.1, atol=0.)
def test_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.cached_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.moving_mean_variance(
x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[ema_, emv_] = sess.run([ema, emv])
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
class MovingLogExponentialMovingMeanExpTest(test.TestCase):
def test_assign_log_moving_mean_exp(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
decay = 0.99
with self.cached_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
log_mean_exp_var = variables.VariableV1(array_ops.zeros_like(true_mean))
variables.global_variables_initializer().run()
log_mean_exp = moving_stats.assign_log_moving_mean_exp(
log_mean_exp_var, x, decay=decay)
expected_ = np.zeros_like(true_mean)
for _ in range(2000):
x_, log_mean_exp_ = sess.run([x, log_mean_exp])
expected_ = np.log(decay * np.exp(expected_) + (1 - decay) * np.exp(x_))
self.assertAllClose(expected_, log_mean_exp_, rtol=1e-6, atol=1e-9)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorDiffeomixture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops import vector_diffeomixture as vdm_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class VectorDiffeomixtureTest(
test_util.VectorDistributionTestHelpers, test.TestCase):
"""Tests the VectorDiffeomixture distribution."""
def testSampleProbConsistentBroadcastMixNoBatch(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.015)
def testSampleProbConsistentBroadcastMixNonStandardBase(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(1., 1.5),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=1., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=3., rtol=0.01)
def testSampleProbConsistentBroadcastMixBatch(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.1)],
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.stack([
np.linspace(2.5, 3.5, dims, dtype=np.float32),
np.linspace(2.75, 3.25, dims, dtype=np.float32),
]),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.01)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.01)
def testSampleProbConsistentBroadcastMixTwoBatchDims(self):
dims = 4
loc_1 = rng.randn(2, 3, dims).astype(np.float32)
with self.cached_session() as sess:
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=(rng.rand(2, 3, 1) - 0.5).astype(np.float32),
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
loc_1,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.1)],
is_positive_definite=True),
] * 2,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.01)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=3., center=loc_1, rtol=0.02)
def testMeanCovarianceNoBatch(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[1 / 10.],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
None,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testTemperatureControlsHowMuchThisLooksLikeDiscreteMixture(self):
# As temperature decreases, this should approach a mixture of normals, with
# components at -2, 2.
with self.cached_session() as sess:
dims = 1
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[0.],
temperature=[[2.], [1.], [0.2]],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
np.float32([2.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(0.5),
is_positive_definite=True),
] * 2, # Use the same scale for each component.
quadrature_size=8,
validate_args=True)
samps = vdm.sample(10000)
self.assertAllEqual((10000, 3, 1), samps.shape)
samps_ = sess.run(samps).reshape(10000, 3) # Make scalar event shape.
# One characteristic of a discrete mixture (as opposed to a "smear") is
# that more weight is put near the component centers at -2, 2, and thus
# less weight is put near the origin.
prob_of_being_near_origin = (np.abs(samps_) < 1).mean(axis=0)
self.assertGreater(
prob_of_being_near_origin[0], prob_of_being_near_origin[1])
self.assertGreater(
prob_of_being_near_origin[1], prob_of_being_near_origin[2])
# Run this test as well, just because we can.
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testConcentrationLocControlsHowMuchWeightIsOnEachComponent(self):
with self.cached_session() as sess:
dims = 1
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[-1.], [0.], [1.]],
temperature=[0.5],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
np.float32([2.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(0.5),
is_positive_definite=True),
] * 2, # Use the same scale for each component.
quadrature_size=8,
validate_args=True)
samps = vdm.sample(10000)
self.assertAllEqual((10000, 3, 1), samps.shape)
samps_ = sess.run(samps).reshape(10000, 3) # Make scalar event shape.
# One characteristic of putting more weight on a component is that the
# mean is closer to that component's mean.
# Get the mean for each batch member, the names signify the value of
# concentration for that batch member.
mean_neg1, mean_0, mean_1 = samps_.mean(axis=0)
# Since concentration is the concentration for component 0,
# concentration = -1 ==> more weight on component 1, which has mean = 2
# concentration = 0 ==> equal weight
# concentration = 1 ==> more weight on component 0, which has mean = -2
self.assertLess(-2, mean_1)
self.assertLess(mean_1, mean_0)
self.assertLess(mean_0, mean_neg1)
self.assertLess(mean_neg1, 2)
# Run this test as well, just because we can.
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testMeanCovarianceNoBatchUncenteredNonStandardBase(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[0.1],
distribution=normal_lib.Normal(-1., 1.5),
loc=[
np.float32([-2.]),
np.float32([0.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
def testMeanCovarianceBatch(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[0.1],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([[-2.]]),
None,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.5)],
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.stack([
np.linspace(2.5, 3.5, dims, dtype=np.float32),
np.linspace(0.5, 1.5, dims, dtype=np.float32),
]),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.07)
def testSampleProbConsistentQuadrature(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[0.],
temperature=[0.1],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=3,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.005)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SquareBijectorTest(test.TestCase):
"""Tests the correctness of the Y = X ** 2 transformation."""
def testBijectorScalar(self):
with self.cached_session():
bijector = bijectors.Square(validate_args=True)
self.assertEqual("square", bijector.name)
x = [[[1., 5],
[2, 1]],
[[np.sqrt(2.), 3],
[np.sqrt(8.), 1]]]
y = np.square(x)
ildj = -np.log(2.) - np.log(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval(), atol=0., rtol=1e-7)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
atol=0.,
rtol=1e-7)
def testScalarCongruency(self):
with self.cached_session():
bijector = bijectors.Square(validate_args=True)
assert_scalar_congruency(bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/square_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ConditionalBijector Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class _TestBijector(ConditionalBijector):
def __init__(self):
super(_TestBijector, self).__init__(
forward_min_event_ndims=0,
graph_parents=[],
is_constant_jacobian=True,
validate_args=False,
dtype=dtypes.float32,
name="test_bijector")
def _forward(self, _, arg1, arg2):
raise ValueError("forward", arg1, arg2)
def _inverse(self, _, arg1, arg2):
raise ValueError("inverse", arg1, arg2)
def _inverse_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("inverse_log_det_jacobian", arg1, arg2)
def _forward_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("forward_log_det_jacobian", arg1, arg2)
class ConditionalBijectorTest(test.TestCase):
def testConditionalBijector(self):
b = _TestBijector()
for name in ["forward", "inverse"]:
method = getattr(b, name)
with self.assertRaisesRegexp(ValueError, name + ".*b1.*b2"):
method(1., arg1="b1", arg2="b2")
for name in ["inverse_log_det_jacobian", "forward_log_det_jacobian"]:
method = getattr(b, name)
with self.assertRaisesRegexp(ValueError, name + ".*b1.*b2"):
method(1., event_ndims=0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AbsoluteValue Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.absolute_value import AbsoluteValue
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# pylint: enable=g-importing-member
class AbsoluteValueTest(test.TestCase):
"""Tests correctness of the absolute value bijector."""
def testBijectorVersusNumpyRewriteOfBasicFunctionsEventNdims0(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
self.assertEqual("absolute_value", bijector.name)
x = array_ops.constant([[0., 1., -1], [0., -5., 3.]]) # Shape [2, 3]
y = math_ops.abs(x)
y_ = y.eval()
self.assertAllClose(y_, bijector.forward(x).eval())
self.assertAllClose((-y_, y_), sess.run(bijector.inverse(y)))
self.assertAllClose((0., 0.),
sess.run(bijector.inverse_log_det_jacobian(
y, event_ndims=0)))
# Run things twice to make sure there are no issues in caching the tuples
# returned by .inverse*
self.assertAllClose(y_, bijector.forward(x).eval())
self.assertAllClose((-y_, y_), sess.run(bijector.inverse(y)))
self.assertAllClose((0., 0.),
sess.run(bijector.inverse_log_det_jacobian(
y, event_ndims=0)))
def testNegativeYRaisesForInverseIfValidateArgs(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
with self.assertRaisesOpError("y was negative"):
sess.run(bijector.inverse(-1.))
def testNegativeYRaisesForILDJIfValidateArgs(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
with self.assertRaisesOpError("y was negative"):
sess.run(bijector.inverse_log_det_jacobian(-1., event_ndims=0))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/absolute_value_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ScaleTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ScaleTriLBijectorTest(test.TestCase):
"""Tests the correctness of the ScaleTriL bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testComputesCorrectValues(self):
shift = 1.61803398875
x = np.float32(np.array([-1, .5, 2]))
y = np.float32(np.array([[np.exp(2) + shift, 0.],
[.5, np.exp(-1) + shift]]))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Exp(),
diag_shift=shift)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_, rtol=1e-4)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, rtol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testInvertible(self):
# Generate random inputs from an unconstrained space, with
# event size 6 to specify 3x3 triangular matrices.
batch_shape = [2, 1]
x = np.float32(self._rng.randn(*(batch_shape + [6])))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Softplus(),
diag_shift=3.14159)
y = self.evaluate(b.forward(x))
self.assertAllEqual(y.shape, batch_shape + [3, 3])
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, rtol=1e-4)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(fldj, -ildj, rtol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/scale_tril_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for FillTriangular bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FillTriangularBijectorTest(test.TestCase):
"""Tests the correctness of the FillTriangular bijector."""
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.array([1., 2., 3.]))
y = np.float32(np.array([[3., 0.],
[2., 1.]]))
b = bijectors.FillTriangular()
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
self.assertAllClose(fldj, 0.)
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(ildj, 0.)
@test_util.run_in_graph_and_eager_modes
def testShape(self):
x_shape = tensor_shape.TensorShape([5, 4, 6])
y_shape = tensor_shape.TensorShape([5, 4, 3, 3])
b = bijectors.FillTriangular(validate_args=True)
x = array_ops.ones(shape=x_shape, dtype=dtypes.float32)
y_ = b.forward(x)
self.assertAllEqual(y_.shape.as_list(), y_shape.as_list())
x_ = b.inverse(y_)
self.assertAllEqual(x_.shape.as_list(), x_shape.as_list())
y_shape_ = b.forward_event_shape(x_shape)
self.assertAllEqual(y_shape_.as_list(), y_shape.as_list())
x_shape_ = b.inverse_event_shape(y_shape)
self.assertAllEqual(x_shape_.as_list(), x_shape.as_list())
y_shape_tensor = self.evaluate(
b.forward_event_shape_tensor(x_shape.as_list()))
self.assertAllEqual(y_shape_tensor, y_shape.as_list())
x_shape_tensor = self.evaluate(
b.inverse_event_shape_tensor(y_shape.as_list()))
self.assertAllEqual(x_shape_tensor, x_shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testShapeError(self):
b = bijectors.FillTriangular(validate_args=True)
x_shape_bad = tensor_shape.TensorShape([5, 4, 7])
with self.assertRaisesRegexp(ValueError, "is not a triangular number"):
b.forward_event_shape(x_shape_bad)
with self.assertRaisesOpError("is not a triangular number"):
self.evaluate(b.forward_event_shape_tensor(x_shape_bad.as_list()))
y_shape_bad = tensor_shape.TensorShape([5, 4, 3, 2])
with self.assertRaisesRegexp(ValueError, "Matrix must be square"):
b.inverse_event_shape(y_shape_bad)
with self.assertRaisesOpError("Matrix must be square"):
self.evaluate(b.inverse_event_shape_tensor(y_shape_bad.as_list()))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/fill_triangular_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.ordered import Ordered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class OrderedBijectorTest(test.TestCase):
"""Tests correctness of the ordered transformation."""
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijectorVector(self):
ordered = Ordered()
self.assertEqual("ordered", ordered.name)
x = np.asarray([[2., 3, 4], [4., 8, 13]])
y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
self.assertAllClose(y, self.evaluate(ordered.forward(x)))
self.assertAllClose(x, self.evaluate(ordered.inverse(y)))
self.assertAllClose(
np.sum(np.asarray(y)[..., 1:], axis=-1),
self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
atol=0.,
rtol=1e-7)
self.assertAllClose(
self.evaluate(-ordered.inverse_log_det_jacobian(y, event_ndims=1)),
self.evaluate(ordered.forward_log_det_jacobian(x, event_ndims=1)),
atol=0.,
rtol=1e-7)
def testBijectorUnknownShape(self):
with self.cached_session():
ordered = Ordered()
self.assertEqual("ordered", ordered.name)
x = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_x = np.asarray([[2., 3, 4], [4., 8, 13]])
y = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
self.assertAllClose(real_y, ordered.forward(x).eval(
feed_dict={x: real_x}))
self.assertAllClose(real_x, ordered.inverse(y).eval(
feed_dict={y: real_y}))
self.assertAllClose(
np.sum(np.asarray(real_y)[..., 1:], axis=-1),
ordered.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-ordered.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
ordered.forward_log_det_jacobian(x, event_ndims=1).eval(
feed_dict={x: real_x}),
atol=0.,
rtol=1e-7)
@test_util.run_in_graph_and_eager_modes
def testShapeGetters(self):
x = tensor_shape.TensorShape([4])
y = tensor_shape.TensorShape([4])
bijector = Ordered(validate_args=True)
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(y.as_list(),
self.evaluate(bijector.forward_event_shape_tensor(
x.as_list())))
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(x.as_list(),
self.evaluate(bijector.inverse_event_shape_tensor(
y.as_list())))
def testBijectiveAndFinite(self):
with self.cached_session():
ordered = Ordered()
x = np.sort(self._rng.randn(3, 10), axis=-1).astype(np.float32)
y = (self._rng.randn(3, 10)).astype(np.float32)
assert_bijective_and_finite(ordered, x, y, event_ndims=1)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/ordered_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineScalarBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.cached_session():
mu = -1.
# scale corresponds to 1.
bijector = AffineScalar(shift=mu)
self.assertEqual("affine_scalar", bijector.name)
def testNoBatchScalar(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = AffineScalar(shift=mu, scale=2.)
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose(
0.,
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = AffineScalar(scale=multiplier)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose(
[np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
0.,
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testTwoBatchScalarIdentityViaScale(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu, scale=[2., 1])
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(2), 0.],
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testScalarCongruency(self):
with self.cached_session():
bijector = AffineScalar(shift=3.6, scale=0.42)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
class AffineLinearOperatorTest(test.TestCase):
def testIdentity(self):
with self.cached_session():
affine = AffineLinearOperator(
validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = x
ildj = 0.
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(
y, event_ndims=2).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=2).eval(),
affine.forward_log_det_jacobian(x, event_ndims=2).eval())
def testDiag(self):
with self.cached_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = diag * x + shift
ildj = -np.sum(np.log(np.abs(diag)), axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(
ildj, affine.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=1).eval(),
affine.forward_log_det_jacobian(x, event_ndims=1).eval())
def testTriL(self):
with self.cached_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
tril = np.array([[[3, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorLowerTriangular(tril, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril) + shift.
y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))))
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(
ildj, affine.inverse_log_det_jacobian(
y, event_ndims=2).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=2).eval(),
affine.forward_log_det_jacobian(x, event_ndims=2).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exp Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class ExpBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = exp(X) transformation."""
def testBijector(self):
with self.cached_session():
bijector = Exp()
self.assertEqual("exp", bijector.name)
x = [[[1.], [2.]]]
y = np.exp(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
-np.squeeze(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(
np.exp(x), event_ndims=1).eval(),
bijector.forward_log_det_jacobian(
x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Exp()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Exp()
x = np.linspace(-10, 10, num=10).astype(np.float32)
y = np.logspace(-10, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import real_nvp_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import RealNVP
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class RealNVPTest(test_util.VectorDistributionTestHelpers, test.TestCase):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[3], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4 * 2)
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=4,
validate_args=True,
**self._real_nvp_kwargs)
x = constant_op.constant(x_)
forward_x = nvp.forward(x)
# Use identity to invalidate cache.
inverse_y = nvp.inverse(array_ops.identity(forward_x))
forward_inverse_y = nvp.forward(inverse_y)
fldj = nvp.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = nvp.inverse_log_det_jacobian(
array_ops.identity(forward_x), event_ndims=1)
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
forward_inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
forward_inverse_y,
ildj,
fldj,
])
self.assertEqual("real_nvp", nvp.name)
self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = Invert(RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class NICETest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
class RealNVPConstantShiftScaleTest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
def constant_shift_log_scale_fn(x0, output_units):
del x0, output_units
shift = constant_op.constant([0.1])
log_scale = constant_op.constant([0.5])
return shift, log_scale
return {
"shift_and_log_scale_fn": constant_shift_log_scale_fn,
"is_constant_jacobian": True,
}
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/real_nvp_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class CholeskyOuterProductBijectorTest(test.TestCase):
"""Tests the correctness of the Y = X @ X.T transformation."""
def testBijectorMatrix(self):
with self.cached_session():
bijector = bijectors.CholeskyOuterProduct(validate_args=True)
self.assertEqual("cholesky_outer_product", bijector.name)
x = [[[1., 0], [2, 1]], [[np.sqrt(2.), 0], [np.sqrt(8.), 1]]]
y = np.matmul(x, np.transpose(x, axes=(0, 2, 1)))
# Fairly easy to compute differentials since we have 2x2.
dx_dy = [[[2. * 1, 0, 0],
[2, 1, 0],
[0, 2 * 2, 2 * 1]],
[[2 * np.sqrt(2.), 0, 0],
[np.sqrt(8.), np.sqrt(2.), 0],
[0, 2 * np.sqrt(8.), 2 * 1]]]
ildj = -np.sum(
np.log(np.asarray(dx_dy).diagonal(
offset=0, axis1=1, axis2=2)),
axis=1)
self.assertAllEqual((2, 2, 2), bijector.forward(x).get_shape())
self.assertAllEqual((2, 2, 2), bijector.inverse(y).get_shape())
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=2).eval(), atol=0., rtol=1e-7)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(
y, event_ndims=2).eval(),
bijector.forward_log_det_jacobian(
x, event_ndims=2).eval(),
atol=0.,
rtol=1e-7)
def testNoBatchStaticJacobian(self):
x = np.eye(2)
bijector = bijectors.CholeskyOuterProduct()
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
def testNoBatchDynamicJacobian(self):
x = np.eye(2)
bijector = bijectors.CholeskyOuterProduct()
x_pl = array_ops.placeholder(dtypes.float32)
with self.cached_session():
log_det_jacobian = bijector.forward_log_det_jacobian(x_pl, event_ndims=2)
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
log_det_jacobian.eval({x_pl: x}))
def testNoBatchStatic(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
y_actual = bijectors.CholeskyOuterProduct().forward(x=x)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertAllEqual([2, 2], y_actual.get_shape())
self.assertAllEqual([2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testNoBatchDeferred(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
x_pl = array_ops.placeholder(dtypes.float32)
y_pl = array_ops.placeholder(dtypes.float32)
y_actual = bijectors.CholeskyOuterProduct().forward(x=x_pl)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchStatic(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
y_actual = bijectors.CholeskyOuterProduct().forward(x=x)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertEqual([2, 2, 2], y_actual.get_shape())
self.assertEqual([2, 2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchDeferred(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
x_pl = array_ops.placeholder(dtypes.float32)
y_pl = array_ops.placeholder(dtypes.float32)
y_actual = bijectors.CholeskyOuterProduct().forward(x=x_pl)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops.bijectors.gumbel import Gumbel
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class GumbelBijectorTest(test.TestCase):
"""Tests correctness of the Gumbel bijector."""
def testBijector(self):
with self.cached_session():
loc = 0.3
scale = 5.
bijector = Gumbel(loc=loc, scale=scale, validate_args=True)
self.assertEqual("gumbel", bijector.name)
x = np.array([[[-3.], [0.], [0.5], [4.2], [12.]]], dtype=np.float32)
# Gumbel distribution
gumbel_dist = stats.gumbel_r(loc=loc, scale=scale)
y = gumbel_dist.cdf(x).astype(np.float32)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
np.squeeze(gumbel_dist.logpdf(x), axis=-1),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Gumbel(loc=0.3, scale=20.), lower_x=1., upper_x=100., rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Gumbel(loc=0., scale=3.0, validate_args=True)
x = np.linspace(-10., 10., num=10).astype(np.float32)
y = np.linspace(0.01, 0.99, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/gumbel_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Permute bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class PermuteBijectorTest(test.TestCase):
"""Tests correctness of the Permute bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
expected_permutation = np.int32([2, 0, 1])
expected_x = np.random.randn(4, 2, 3)
expected_y = expected_x[..., expected_permutation]
with self.cached_session() as sess:
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
[
permutation_,
x_,
y_,
fldj,
ildj,
] = sess.run([
bijector.permutation,
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=1),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=1),
], feed_dict={permutation_ph: expected_permutation})
self.assertEqual("permute", bijector.name)
self.assertAllEqual(expected_permutation, permutation_)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj, rtol=1e-6, atol=0)
def testRaisesOpError(self):
with self.cached_session() as sess:
with self.assertRaisesOpError("Permutation over `d` must contain"):
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
sess.run(bijector.inverse([1.]),
feed_dict={permutation_ph: [1, 2]})
def testBijectiveAndFinite(self):
permutation = np.int32([2, 0, 1])
x = np.random.randn(4, 2, 3)
y = x[..., permutation]
with self.cached_session():
bijector = Permute(permutation=permutation, validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=1, rtol=1e-6, atol=0)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import _gen_mask
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import masked_autoregressive_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class GenMaskTest(test.TestCase):
def test346Exclusive(self):
expected_mask = np.array(
[[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="exclusive")
self.assertAllEqual(expected_mask, mask)
def test346Inclusive(self):
expected_mask = np.array(
[[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="inclusive")
self.assertAllEqual(expected_mask, mask)
class MaskedAutoregressiveFlowTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4, 2)
with self.cached_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
x = constant_op.constant(x_)
forward_x = ma.forward(x)
# Use identity to invalidate cache.
inverse_y = ma.inverse(array_ops.identity(forward_x))
fldj = ma.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = ma.inverse_log_det_jacobian(
array_ops.identity(forward_x), event_ndims=1)
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
ildj,
fldj,
])
self.assertEqual("masked_autoregressive_flow", ma.name)
self.assertAllClose(forward_x_, forward_x_, rtol=1e-6, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-5, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
ma = Invert(MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class MaskedAutoregressiveFlowShiftOnlyTest(MaskedAutoregressiveFlowTest):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
class MaskedAutoregressiveFlowUnrollLoopTest(MaskedAutoregressiveFlowTest):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=False),
"is_constant_jacobian": False,
"unroll_loop": True,
}
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/masked_autoregressive_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.cached_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(),
bijectors.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
bijectors.Softplus(),
bijectors.SoftmaxCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x, event_ndims=1).eval(),
rev.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x, event_ndims=1).eval(),
rev.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.cached_session():
bijector = bijectors.Invert(bijectors.SoftmaxCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.cached_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.contrib.distributions.python.ops.bijectors.chain import Chain
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class ShapeChanging(bijector.Bijector):
"""Only used for op_ndims manipulation."""
def __init__(self, forward_min_event_ndims=0, inverse_min_event_ndims=3):
super(ShapeChanging, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
validate_args=False, name="shape_changer")
class ChainBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation."""
def testBijector(self):
with self.cached_session():
chain = Chain((Exp(), Softplus()))
self.assertEqual("chain_of_exp_of_softplus", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(1. + np.exp(x), chain.forward(x).eval())
self.assertAllClose(np.log(x - 1.), chain.inverse(x).eval())
self.assertAllClose(
-np.sum(np.log(x - 1.), axis=2),
chain.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
np.sum(x, axis=2),
chain.forward_log_det_jacobian(x, event_ndims=1).eval())
def testBijectorIdentity(self):
with self.cached_session():
chain = Chain()
self.assertEqual("identity", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(x, chain.forward(x).eval())
self.assertAllClose(x, chain.inverse(x).eval())
self.assertAllClose(
0., chain.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
0., chain.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
chain = Chain((Exp(), Softplus()))
assert_scalar_congruency(
chain, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.cached_session():
chain = Chain([
SoftmaxCentered(validate_args=True),
SoftmaxCentered(validate_args=True),
])
x = tensor_shape.TensorShape([1])
y = tensor_shape.TensorShape([2 + 1])
self.assertAllEqual(y, chain.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
chain.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, chain.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
chain.inverse_event_shape_tensor(y.as_list()).eval())
def testMinEventNdimsChain(self):
chain = Chain([Exp(), Exp(), Exp()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Affine(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Exp(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Exp()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Exp(), Softplus(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingAddDims(self):
chain = Chain([ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(3, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(4, chain.inverse_min_event_ndims)
chain = Chain([Affine(), ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(3, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(), ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(6, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingRemoveDims(self):
chain = Chain([ShapeChanging(3, 0)])
self.assertEqual(3, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(3, 0), Affine()])
self.assertEqual(3, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([Affine(), ShapeChanging(3, 0)])
self.assertEqual(4, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(3, 0), ShapeChanging(3, 0)])
self.assertEqual(6, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingAddRemoveDims(self):
chain = Chain([
ShapeChanging(2, 1),
ShapeChanging(3, 0),
ShapeChanging(1, 2)])
self.assertEqual(4, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
def testChainExpAffine(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = Chain([Exp(), Affine(scale_diag=scale_diag)])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 27.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(scale_diag * x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(scale_diag * x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testChainAffineExp(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = Chain([Affine(scale_diag=scale_diag), Exp()])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 9.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testChainIldjWithPlaceholder(self):
chain = Chain((Exp(), Exp()))
samples = array_ops.placeholder(
dtype=np.float32, shape=[None, 10], name="samples")
ildj = chain.inverse_log_det_jacobian(samples, event_ndims=0)
self.assertTrue(ildj is not None)
with self.cached_session():
ildj.eval({samples: np.zeros([2, 10], np.float32)})
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SigmoidBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation."""
def testBijector(self):
with self.cached_session():
self.assertEqual("sigmoid", Sigmoid().name)
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = Sigmoid()
self.assertAllClose(y, bijector.forward(x).eval(), atol=0., rtol=1e-2)
self.assertAllClose(x, bijector.inverse(y).eval(), atol=0., rtol=1e-4)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval(), atol=0., rtol=1e-6)
self.assertAllClose(-ildj, bijector.forward_log_det_jacobian(
x, event_ndims=0).eval(), atol=0., rtol=1e-4)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(Sigmoid(), lower_x=-7., upper_x=7.)
def testBijectiveAndFinite(self):
with self.cached_session():
x = np.linspace(-7., 7., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
assert_bijective_and_finite(
Sigmoid(), x, y, event_ndims=0, atol=0., rtol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchNorm Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import BatchNormalization
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class BatchNormTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def _reduction_axes(self, input_shape, event_dims):
if isinstance(event_dims, int):
event_dims = [event_dims]
ndims = len(input_shape)
# Convert event_dims to non-negative indexing.
event_dims = list(event_dims)
for idx, x in enumerate(event_dims):
if x < 0:
event_dims[idx] = ndims + x
return tuple(i for i in range(ndims) if i not in event_dims)
def testForwardInverse(self):
"""Tests forward and backward passes with different event shapes.
input_shape: Tuple of shapes for input tensor.
event_dims: Tuple of dimension indices that will be normalized.
training: Boolean of whether bijector runs in training or inference mode.
"""
params = [
((5*2, 4), [-1], False),
((5, 2, 4), [-1], False),
((5, 2, 4), [1, 2], False),
((5, 2, 4), [0, 1], False),
((5*2, 4), [-1], True),
((5, 2, 4), [-1], True),
((5, 2, 4), [1, 2], True),
((5, 2, 4), [0, 1], True)
]
for input_shape, event_dims, training in params:
x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
with self.cached_session() as sess:
x = constant_op.constant(x_)
# When training, memorize the exact mean of the last
# minibatch that it normalized (instead of moving average assignment).
layer = normalization.BatchNormalization(
axis=event_dims, momentum=0., epsilon=0.)
batch_norm = BatchNormalization(
batchnorm_layer=layer, training=training)
# Minibatch statistics are saved only after norm_x has been computed.
norm_x = batch_norm.inverse(x)
with ops.control_dependencies(batch_norm.batchnorm.updates):
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
denorm_x = batch_norm.forward(array_ops.identity(norm_x))
fldj = batch_norm.forward_log_det_jacobian(
x, event_ndims=len(event_dims))
# Use identity to invalidate cache.
ildj = batch_norm.inverse_log_det_jacobian(
array_ops.identity(denorm_x), event_ndims=len(event_dims))
variables.global_variables_initializer().run()
# Update variables.
norm_x_ = sess.run(norm_x)
[
norm_x_,
moving_mean_,
moving_var_,
denorm_x_,
ildj_,
fldj_,
] = sess.run([
norm_x,
moving_mean,
moving_var,
denorm_x,
ildj,
fldj,
])
self.assertEqual("batch_normalization", batch_norm.name)
reduction_axes = self._reduction_axes(input_shape, event_dims)
keepdims = len(event_dims) > 1
expected_batch_mean = np.mean(
x_, axis=reduction_axes, keepdims=keepdims)
expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)
if training:
# When training=True, values become normalized across batch dim and
# original values are recovered after de-normalizing.
zeros = np.zeros_like(norm_x_)
self.assertAllClose(np.mean(zeros, axis=reduction_axes),
np.mean(norm_x_, axis=reduction_axes))
self.assertAllClose(expected_batch_mean, moving_mean_)
self.assertAllClose(expected_batch_var, moving_var_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# Since moving statistics are set to batch statistics after
# normalization, ildj and -fldj should match.
self.assertAllClose(ildj_, -fldj_)
# ildj is computed with minibatch statistics.
expected_ildj = np.sum(np.log(1.) - .5 * np.log(
expected_batch_var + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
else:
# When training=False, moving_mean, moving_var remain at their
# initialized values (0., 1.), resulting in no scale/shift (a small
# shift occurs if epsilon > 0.)
self.assertAllClose(x_, norm_x_, atol=1e-5, rtol=1e-5)
self.assertAllClose(x_, denorm_x_, atol=1e-5, rtol=1e-5)
# ildj is computed with saved statistics.
expected_ildj = np.sum(
np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
def testMaximumLikelihoodTraining(self):
# Test Maximum Likelihood training with default bijector.
with self.cached_session() as sess:
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
batch_norm = BatchNormalization(training=True)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm)
target_dist = distributions.MultivariateNormalDiag(loc=[1., 2.])
target_samples = target_dist.sample(100)
dist_samples = dist.sample(3000)
loss = -math_ops.reduce_mean(dist.log_prob(target_samples))
with ops.control_dependencies(batch_norm.batchnorm.updates):
train_op = adam.AdamOptimizer(1e-2).minimize(loss)
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
variables.global_variables_initializer().run()
for _ in range(3000):
sess.run(train_op)
[
dist_samples_,
moving_mean_,
moving_var_
] = sess.run([
dist_samples,
moving_mean,
moving_var
])
self.assertAllClose([1., 2.], np.mean(dist_samples_, axis=0), atol=5e-2)
self.assertAllClose([1., 2.], moving_mean_, atol=5e-2)
self.assertAllClose([1., 1.], moving_var_, atol=5e-2)
def testLogProb(self):
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm,
validate_args=True)
samples = dist.sample(int(1e5))
# No volume distortion since training=False, bijector is initialized
# to the identity transformation.
base_log_prob = base_dist.log_prob(samples)
dist_log_prob = dist.log_prob(samples)
variables.global_variables_initializer().run()
base_log_prob_, dist_log_prob_ = sess.run([base_log_prob, dist_log_prob])
self.assertAllClose(base_log_prob_, dist_log_prob_)
def testMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = Invert(
BatchNormalization(batchnorm_layer=layer, training=False))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/batch_normalization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softsign import Softsign
from tensorflow.python.framework import test_util
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SoftsignBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = X / (1 + |X|) transformation."""
def _softsign(self, x):
return x / (1. + np.abs(x))
def _softsign_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -2. * np.log1p(-np.abs(y))
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijectorBounds(self):
bijector = Softsign(validate_args=True)
with self.assertRaisesOpError("greater than -1"):
self.evaluate(bijector.inverse(-3.))
with self.assertRaisesOpError("greater than -1"):
self.evaluate(bijector.inverse_log_det_jacobian(-3., event_ndims=0))
with self.assertRaisesOpError("less than 1"):
self.evaluate(bijector.inverse(3.))
with self.assertRaisesOpError("less than 1"):
self.evaluate(bijector.inverse_log_det_jacobian(3., event_ndims=0))
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverse(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
x = 2. * self._rng.randn(2, 10)
y = self._softsign(x)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsZero(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softsign_ildj_before_reduction(y)
self.assertAllClose(ildj, self.evaluate(
bijector.inverse_log_det_jacobian(y, event_ndims=0)))
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverseEventDimsOne(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
x = 2. * self._rng.randn(2, 10)
y = self._softsign(x)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsOne(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)
ildj_before = self._softsign_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(
ildj, self.evaluate(
bijector.inverse_log_det_jacobian(y, event_ndims=1)))
def testScalarCongruency(self):
with self.cached_session():
bijector = Softsign(validate_args=True)
assert_scalar_congruency(bijector, lower_x=-20., upper_x=20.)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Softsign(validate_args=True)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.linspace(-0.99, 0.99, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softsign_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformDiagonal bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class TransformDiagonalBijectorTest(test.TestCase):
"""Tests correctness of the TransformDiagonal bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.random.randn(3, 4, 4))
y = x.copy()
for i in range(x.shape[0]):
np.fill_diagonal(y[i, :, :], np.exp(np.diag(x[i, :, :])))
exp = bijectors.Exp()
b = bijectors.TransformDiagonal(diag_bijector=exp)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=2))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllEqual(
fldj,
self.evaluate(exp.forward_log_det_jacobian(
np.array([np.diag(x_mat) for x_mat in x]),
event_ndims=1)))
self.assertAllEqual(
ildj,
self.evaluate(exp.inverse_log_det_jacobian(
np.array([np.diag(y_mat) for y_mat in y]),
event_ndims=1)))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/transform_diagonal_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MatrixInverseTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MatrixInverseTriLBijectorTest(test.TestCase):
"""Tests the correctness of the Y = inv(tril) transformation."""
#The inverse of 0 is undefined, as the numbers above the main
#diagonal must be zero, we zero out these numbers after running inverse.
#See: https://github.com/numpy/numpy/issues/11445
def _inv(self, x):
y = np.linalg.inv(x)
#triu_indices only works on 2d arrays
#need to iterate over all the 2d arrays in a x-dimensional array.
for idx in np.ndindex(y.shape[0:-2]):
y[idx][np.triu_indices(y[idx].shape[-1], 1)] = 0
return y
def testComputesCorrectValues(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
self.assertEqual("matrix_inverse_tril", inv.name)
x_ = np.array([[0.7, 0., 0.],
[0.1, -1., 0.],
[0.3, 0.25, 0.5]], dtype=np.float32)
x_inv_ = np.linalg.inv(x_)
expected_fldj_ = -6. * np.sum(np.log(np.abs(np.diag(x_))))
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testOneByOneMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[5.]], dtype=np.float32)
x_inv_ = np.array([[0.2]], dtype=np.float32)
expected_fldj_ = np.log(0.04)
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testZeroByZeroMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.eye(0, dtype=np.float32)
x_inv_ = np.eye(0, dtype=np.float32)
expected_fldj_ = 0.
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testBatch(self):
# Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape
# (2, 1).
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[[[1., 0.],
[2., 3.]]],
[[[4., 0.],
[5., -6.]]]], dtype=np.float32)
x_inv_ = self._inv(x_)
expected_fldj_ = -4. * np.sum(
np.log(np.abs(np.diagonal(x_, axis1=-2, axis2=-1))), axis=-1)
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertAllClose(expected_fldj_, fldj_, atol=0., rtol=1e-3)
self.assertAllClose(-expected_fldj_, ildj_, atol=0., rtol=1e-3)
def testErrorOnInputRankTooLow(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([0.1], dtype=np.float32)
rank_error_msg = "must have rank at least 2"
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
# TODO(b/80481923): Figure out why these assertions fail, and fix them.
## def testErrorOnInputNonSquare(self):
## inv = bijectors.MatrixInverseTriL(validate_args=True)
## x_ = np.array([[1., 2., 3.],
## [4., 5., 6.]], dtype=np.float32)
## square_error_msg = "must be a square matrix"
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.forward(x_))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.inverse(x_))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testErrorOnInputNotLowerTriangular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 2.],
[3., 4.]], dtype=np.float32)
triangular_error_msg = "must be lower triangular"
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testErrorOnInputSingular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 0.],
[0., 0.]], dtype=np.float32)
nonsingular_error_msg = "must have all diagonal entries nonzero"
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/matrix_inverse_tril_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reshape Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class _ReshapeBijectorTest(object):
"""Base class for testing the reshape transformation.
Methods defined in this class call a method self.build_shapes() that
is implemented by subclasses defined below, returning respectively
ReshapeBijectorTestStatic: static shapes,
ReshapeBijectorTestDynamic: shape placeholders of known ndims, and
ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,
so that each test in this base class is automatically run over all
three cases. The subclasses also implement assertRaisesError to test
for either Python exceptions (in the case of static shapes) or
TensorFlow op errors (dynamic shapes).
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
"""Do a basic sanity check of forward, inverse, jacobian."""
expected_x = np.random.randn(4, 3, 2)
expected_y = np.reshape(expected_x, [4, 6])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
fldj_,
ildj_) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=2),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),
), feed_dict=feed_dict)
self.assertEqual("reshape", bijector.name)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)
def testEventShapeTensor(self):
"""Test event_shape_tensor methods when even ndims may be dynamic."""
shape_in_static = [2, 3]
shape_out_static = [6,]
shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static,
shape_out_static)
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in, validate_args=True)
# using the _tensor methods, we should always get a fully-specified
# result since these are evaluated at graph runtime.
with self.cached_session() as sess:
(shape_out_,
shape_in_) = sess.run((
bijector.forward_event_shape_tensor(shape_in),
bijector.inverse_event_shape_tensor(shape_out),
), feed_dict=feed_dict)
self.assertAllEqual(shape_out_static, shape_out_)
self.assertAllEqual(shape_in_static, shape_in_)
def testScalarReshape(self):
"""Test reshaping to and from a scalar shape ()."""
expected_x = np.random.randn(4, 3, 1)
expected_y = np.reshape(expected_x, [4, 3])
expected_x_scalar = np.random.randn(1,)
expected_y_scalar = expected_x_scalar[0]
shape_in, shape_out, feed_dict = self.build_shapes([], [1,])
with self.cached_session() as sess:
bijector = Reshape(
event_shape_out=shape_in,
event_shape_in=shape_out, validate_args=True)
(x_,
y_,
x_scalar_,
y_scalar_
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.inverse(expected_y_scalar),
bijector.forward(expected_x_scalar),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)
def testMultipleUnspecifiedDimensionsOpError(self):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInvalidDimensionsOpError(self, expected_error_message):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
def _testInvalidDimensionsStatic(self, expected_error_message):
"""Version of _testInvalidDimensionsOpError for errors detected statically.
Statically means at graph construction time.
Args:
expected_error_message: String that should be present in the error
message that `Reshape` raises for invalid shapes.
"""
shape_in, shape_out, _ = self.build_shapes([2, 3], [
1,
2,
-2,
])
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
expected_error_message):
_ = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# pylint: enable=invalid-name
def testValidButNonMatchingInputOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# Here we pass in a tensor (x) whose shape is compatible with
# the output shape, so tf.reshape will throw no error, but
# doesn't match the expected input shape.
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
def testValidButNonMatchingInputPartiallySpecifiedOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInputOutputMismatchOpError(self, expected_error_message):
x1 = np.random.randn(4, 2, 3)
x2 = np.random.randn(4, 1, 1, 5)
with self.cached_session() as sess:
shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3],
[1, 1, 5])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward(x1), feed_dict=fd_mismatched)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.inverse(x2), feed_dict=fd_mismatched)
# pylint: enable=invalid-name
def testOneShapePartiallySpecified(self):
expected_x = np.random.randn(4, 6)
expected_y = np.reshape(expected_x, [4, 2, 3])
with self.cached_session() as sess:
# one of input/output shapes is partially specified
shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testBothShapesPartiallySpecified(self):
expected_x = np.random.randn(4, 2, 3)
expected_y = np.reshape(expected_x, [4, 3, 2])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testDefaultVectorShape(self):
expected_x = np.random.randn(4, 4)
expected_y = np.reshape(expected_x, [4, 2, 2])
with self.cached_session() as sess:
_, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2])
bijector = Reshape(shape_out,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def build_shapes(self, *args, **kwargs):
raise NotImplementedError("Subclass failed to implement `build_shapes`.")
class ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_static = shape_in
shape_out_static = shape_out
feed_dict = {}
return shape_in_static, shape_out_static, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testEventShape(self):
shape_in_static = tensor_shape.TensorShape([2, 3])
shape_out_static = tensor_shape.TensorShape([6,])
bijector = Reshape(
event_shape_out=shape_out_static,
event_shape_in=shape_in_static, validate_args=True)
# test that forward_ and inverse_event_shape do sensible things
# when shapes are statically known.
self.assertEqual(
bijector.forward_event_shape(shape_in_static),
shape_out_static)
self.assertEqual(
bijector.inverse_event_shape(shape_out_static),
shape_in_static)
def testBijectiveAndFinite(self):
x = np.random.randn(4, 2, 3)
y = np.reshape(x, [4, 1, 2, 3])
with self.cached_session():
bijector = Reshape(
event_shape_in=[2, 3],
event_shape_out=[1, 2, 3],
validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=2, rtol=1e-6, atol=0)
def testInvalidDimensionsStatic(self):
self._testInvalidDimensionsStatic(
"elements must be either positive integers or `-1`")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Cannot reshape a tensor with")
class ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=(len(shape_in),),
dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=(len(shape_out),),
dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
class ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/reshape_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SinhArcsinh Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import SinhArcsinh
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
# pylint: enable=g-importing-member
class SinhArcsinhBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijectorVersusNumpyRewriteOfBasicFunctions(self):
with self.cached_session():
skewness = 0.2
tailweight = 2.0
bijector = SinhArcsinh(
skewness=skewness,
tailweight=tailweight,
validate_args=True)
self.assertEqual("SinhArcsinh", bijector.name)
x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
np.sum(
np.log(np.cosh(np.arcsinh(y) / tailweight - skewness)) -
np.log(tailweight) - np.log(np.sqrt(y**2 + 1)),
axis=-1),
bijector.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testLargerTailWeightPutsMoreWeightInTails(self):
with self.cached_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
tailweight = [[0.5], [1.0], [2.0]]
bijector = SinhArcsinh(tailweight=tailweight, validate_args=True)
y = bijector.forward(x).eval()
# x = -1, 1 should be mapped to points symmetric about 0
self.assertAllClose(y[:, 0], -1. * y[:, 1])
# forward(1) should increase as tailweight increases, since higher
# tailweight should map 1 to a larger number.
forward_1 = y[:, 1] # The positive values of y.
self.assertLess(forward_1[0], forward_1[1])
self.assertLess(forward_1[1], forward_1[2])
def testSkew(self):
with self.cached_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
skewness = [[-1.], [0.], [1.]]
bijector = SinhArcsinh(skewness=skewness, validate_args=True)
y = bijector.forward(x).eval()
# For skew < 0, |forward(-1)| > |forward(1)|
self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))
# For skew = 0, |forward(-1)| = |forward(1)|
self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))
# For skew > 0, |forward(-1)| < |forward(1)|
self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))
def testScalarCongruencySkewness1Tailweight0p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=1.0, tailweight=0.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testScalarCongruencySkewnessNeg1Tailweight1p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=-1.0, tailweight=1.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=-1., tailweight=0.5, validate_args=True)
x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(
-2, 10, 1000))).astype(np.float32)
assert_bijective_and_finite(bijector, x, x, event_ndims=0, rtol=1e-3)
def testBijectiveAndFiniteSkewness1Tailweight3(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=1., tailweight=3., validate_args=True)
x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(
-2, 5, 1000))).astype(np.float32)
assert_bijective_and_finite(
bijector, x, x, event_ndims=0, rtol=1e-3)
def testBijectorEndpoints(self):
with self.cached_session():
for dtype in (np.float32, np.float64):
bijector = SinhArcsinh(
skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)
bounds = np.array(
[np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)
# Note that the above bijector is the identity bijector. Hence, the
# log_det_jacobian will be 0. Because of this we use atol.
assert_bijective_and_finite(
bijector, bounds, bounds, event_ndims=0, atol=2e-6)
def testBijectorOverRange(self):
with self.cached_session():
for dtype in (np.float32, np.float64):
skewness = np.array([1.2, 5.], dtype=dtype)
tailweight = np.array([2., 10.], dtype=dtype)
# The inverse will be defined up to where sinh is valid, which is
# arcsinh(np.finfo(dtype).max).
log_boundary = np.log(
np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))
x = np.array([
np.logspace(-2, log_boundary[0], base=np.e, num=1000),
np.logspace(-2, log_boundary[1], base=np.e, num=1000)
], dtype=dtype)
# Ensure broadcasting works.
x = np.swapaxes(x, 0, 1)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
bijector = SinhArcsinh(
skewness=skewness, tailweight=tailweight, validate_args=True)
self.assertAllClose(y, bijector.forward(x).eval(), rtol=1e-4, atol=0.)
self.assertAllClose(x, bijector.inverse(y).eval(), rtol=1e-4, atol=0.)
# On IBM PPC systems, longdouble (np.float128) is same as double except that it can have more precision.
# Type double being of 8 bytes, can't hold square of max of float64 (which is also 8 bytes) and
# below test fails due to overflow error giving inf. So this check avoids that error by skipping square
# calculation and corresponding assert.
if np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and \
np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min)):
# Do the numpy calculation in float128 to avoid inf/nan.
y_float128 = np.float128(y)
self.assertAllClose(
np.log(np.cosh(
np.arcsinh(y_float128) / tailweight - skewness) / np.sqrt(
y_float128**2 + 1)) -
np.log(tailweight),
bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
def testZeroTailweightRaises(self):
with self.cached_session():
with self.assertRaisesOpError("not positive"):
SinhArcsinh(tailweight=0., validate_args=True).forward(1.0).eval()
def testDefaultDtypeIsFloat32(self):
with self.cached_session():
bijector = SinhArcsinh()
self.assertEqual(bijector.tailweight.dtype, np.float32)
self.assertEqual(bijector.skewness.dtype, np.float32)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/sinh_arcsinh_bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import PowerTransform
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class PowerTransformBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijector(self):
with self.cached_session():
c = 0.2
bijector = PowerTransform(power=c, validate_args=True)
self.assertEqual("power_transform", bijector.name)
x = np.array([[[-1.], [2.], [-5. + 1e-4]]])
y = (1. + x * c)**(1. / c)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
(c - 1.) * np.sum(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
bijector = PowerTransform(power=0.2, validate_args=True)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = PowerTransform(power=0.2, validate_args=True)
x = np.linspace(-4.999, 10, num=10).astype(np.float32)
y = np.logspace(0.001, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class AffineBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.cached_session():
mu = -1.
# scale corresponds to 1.
bijector = Affine(shift=mu)
self.assertEqual("affine", bijector.name)
def testNoBatchMultivariateIdentity(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[1., 0], [0, 1.]]
bijector = Affine(shift=mu)
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1], [-1., -1]]
self.assertAllClose([[2., 0], [0., -2]], run(bijector.forward, x))
self.assertAllClose([[0., 2], [-2., 0]], run(bijector.inverse, x))
self.assertAllClose(
0., run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateDiag(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[2., 0], [0, 1.]]
bijector = Affine(shift=mu, scale_diag=[2., 1])
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
# Reset bijector.
bijector = Affine(shift=mu, scale_diag=[2., 1])
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1],
[-1., -1]]
self.assertAllClose([[3., 0],
[-1., -2]],
run(bijector.forward, x))
self.assertAllClose([[0., 2],
[-1., 0]],
run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateFullDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
x_value = np.array([[1., 1]], dtype=np.float32)
mu_value = np.array([1., -1], dtype=np.float32)
scale_diag_value = np.array([2., 2], dtype=np.float32)
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
}
bijector = Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[3., 1]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[0., 1]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
-np.log(4),
sess.run(bijector.inverse_log_det_jacobian(x, event_ndims=1),
feed_dict))
def testBatchMultivariateIdentity(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale = 2.
bijector = Affine(shift=mu, scale_identity_multiplier=scale)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateDiag(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale_diag = [[2., 2]]
bijector = Affine(shift=mu, scale_diag=scale_diag)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(4)],
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateFullDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
x_value = np.array([[[1., 1]]], dtype=np.float32)
mu_value = np.array([[1., -1]], dtype=np.float32)
scale_diag_value = np.array([[2., 2]], dtype=np.float32)
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
}
bijector = Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[[3., 1]]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[[0., 1]]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
[-np.log(4)],
sess.run(bijector.inverse_log_det_jacobian(
x, event_ndims=1), feed_dict))
def testIdentityWithDiagUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_diag=[1., 1., 1.])
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.**3),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 2]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 5]], run(bijector.forward, x))
self.assertAllClose([[1., 0.5]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 3]]
bijector = Affine(
shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 7]], run(bijector.forward, x))
self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(6.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityAndDiagWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[3., 0], [2, 4]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.0,
scale_diag=[1., 2.],
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[2., 9]], run(bijector.forward, x))
self.assertAllClose([[2 / 3., 5 / 12.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(12.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=2.,
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 2, 3])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 3, 8], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1.5, 4 / 3.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(60.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
bijector = Affine(
shift=mu,
scale_diag=[2., 3, 4],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 3, 5])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 5, 14], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[10., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 14 / 15., 4 / 25.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdateNoDiagonal(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=None,
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([5., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(90.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateRaisesWhenSingular(self):
with self.cached_session():
mu = [1., -1]
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"diagonal part must be non-zero"):
_ = Affine(
shift=mu,
# Has zero on the diagonal.
scale_diag=[0., 1],
validate_args=True)
# Error detected statically; don't need to run the op.
def _makeScale(self,
x,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None):
"""Create a scale matrix. Return None if it can not be created."""
c = scale_identity_multiplier
d1 = scale_diag
tril = scale_tril
v = scale_perturb_factor
d2 = scale_perturb_diag
# Ambiguous low rank update.
if v is None and d2 is not None:
return None
if c is None and d1 is None and tril is None:
# Special case when no scale args are passed in. This means use an
# identity matrix.
c = 1.
matrix = np.float32(0.)
if c is not None:
# Infer the dimension from x.
matrix += c * self._matrix_diag(np.ones_like(x))
if d1 is not None:
matrix += self._matrix_diag(np.array(d1, dtype=np.float32))
if tril is not None:
matrix += np.array(tril, dtype=np.float32)
if v is not None:
v = np.array(v, dtype=np.float32)
if v.ndim < 2:
vt = v.T
else:
vt = np.swapaxes(v, axis1=v.ndim - 2, axis2=v.ndim - 1)
if d2 is not None:
d2 = self._matrix_diag(np.array(d2, dtype=np.float32))
right = np.matmul(d2, vt)
else:
right = vt
matrix += np.matmul(v, right)
return matrix
def _matrix_diag(self, d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _testLegalInputs(self, shift=None, scale_params=None, x=None):
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({"x": x}, **args)
scale = self._makeScale(**scale_args)
# We haven't specified enough information for the scale.
if scale is None:
with self.assertRaisesRegexp(ValueError, ("must be specified.")):
bijector = Affine(shift=shift, **args)
else:
bijector = Affine(shift=shift, **args)
np_x = x
# For the case a vector is passed in, we need to make the shape
# match the matrix for matmul to work.
if x.ndim == scale.ndim - 1:
np_x = np.expand_dims(x, axis=-1)
forward = np.matmul(scale, np_x) + shift
if x.ndim == scale.ndim - 1:
forward = np.squeeze(forward, axis=-1)
self.assertAllClose(forward, bijector.forward(x).eval())
backward = np.linalg.solve(scale, np_x - shift)
if x.ndim == scale.ndim - 1:
backward = np.squeeze(backward, axis=-1)
self.assertAllClose(backward, bijector.inverse(x).eval())
scale *= np.ones(shape=x.shape[:-1], dtype=scale.dtype)
ildj = -np.log(np.abs(np.linalg.det(scale)))
# TODO(jvdillon): We need to make it so the scale_identity_multiplier
# case does not deviate in expected shape. Fixing this will get rid of
# these special cases.
if (ildj.ndim > 0 and (len(scale_args) == 1 or (
len(scale_args) == 2 and
scale_args.get("scale_identity_multiplier", None) is not None))):
ildj = np.squeeze(ildj[0])
elif ildj.ndim < scale.ndim - 2:
ildj = np.reshape(ildj, scale.shape[0:-2])
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(x, event_ndims=1).eval())
def testLegalInputs(self):
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
"scale_perturb_factor": [[1., 0],
[1.5, 3.]],
"scale_perturb_diag": [3., 1.]
},
x=np.array(
[1., 2], dtype=np.float32))
def testLegalInputsWithBatch(self):
# Shape of scale is [2, 1, 2, 2]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3.]], [[1., 2]]],
"scale_tril": [[[[1., 0.], [-3., 3.]]], [[[0.5, 0.], [1., 1.]]]],
"scale_perturb_factor": [[[[1., 0], [1.5, 3.]]],
[[[1., 0], [1., 1.]]]],
"scale_perturb_diag": [[[3., 1.]], [[0.5, 1.]]]
},
x=np.array(
[[[1., 2]], [[3., 4]]], dtype=np.float32))
def testNegativeDetTrilPlusVDVT(self):
# scale = [[3.7, 2.7],
# [-0.3, -1.3]]
# inv(scale) = [[0.325, 0.675],
# [-0.075, -0.925]]
# eig(scale) = [3.5324, -1.1324]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_tril": [[1., 0], [-3, -4]],
"scale_perturb_factor": [[0.1, 0], [0.5, 0.3]],
"scale_perturb_diag": [3., 1]
},
x=np.array(
[1., 2], dtype=np.float32))
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftmaxCenteredBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = exp(X) / sum(exp(X)) transformation."""
def testBijectorVector(self):
with self.cached_session():
softmax = SoftmaxCentered()
self.assertEqual("softmax_centered", softmax.name)
x = np.log([[2., 3, 4], [4., 8, 12]])
y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
self.assertAllClose(y, softmax.forward(x).eval())
self.assertAllClose(x, softmax.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=1),
softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(),
softmax.forward_log_det_jacobian(x, event_ndims=1).eval(),
atol=0.,
rtol=1e-7)
def testBijectorUnknownShape(self):
with self.cached_session():
softmax = SoftmaxCentered()
self.assertEqual("softmax_centered", softmax.name)
x = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_x = np.log([[2., 3, 4], [4., 8, 12]])
y = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
self.assertAllClose(real_y, softmax.forward(x).eval(
feed_dict={x: real_x}))
self.assertAllClose(real_x, softmax.inverse(y).eval(
feed_dict={y: real_y}))
self.assertAllClose(
-np.sum(np.log(real_y), axis=1),
softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
softmax.forward_log_det_jacobian(x, event_ndims=1).eval(
feed_dict={x: real_x}),
atol=0.,
rtol=1e-7)
def testShapeGetters(self):
with self.cached_session():
x = tensor_shape.TensorShape([4])
y = tensor_shape.TensorShape([5])
bijector = SoftmaxCentered(validate_args=True)
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(y.as_list(),
bijector.forward_event_shape_tensor(
x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(x.as_list(),
bijector.inverse_event_shape_tensor(
y.as_list()).eval())
def testBijectiveAndFinite(self):
with self.cached_session():
softmax = SoftmaxCentered()
x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32)
# Make y values on the simplex with a wide range.
y_0 = np.ones(5).astype(np.float32)
y_1 = (1e-5 * rng.rand(5)).astype(np.float32)
y_2 = (1e1 * rng.rand(5)).astype(np.float32)
y = np.array([y_0, y_1, y_2])
y /= y.sum(axis=0)
y = y.T # y.shape = [5, 3]
assert_bijective_and_finite(softmax, x, y, event_ndims=1)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Kumaraswamy Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.kumaraswamy import Kumaraswamy
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class KumaraswamyBijectorTest(test.TestCase):
"""Tests correctness of the Kumaraswamy bijector."""
def testBijector(self):
with self.cached_session():
a = 2.
b = 0.3
bijector = Kumaraswamy(
concentration1=a, concentration0=b, validate_args=True)
self.assertEqual("kumaraswamy", bijector.name)
x = np.array([[[0.1], [0.2], [0.3], [0.4], [0.5]]], dtype=np.float32)
# Kumaraswamy cdf. This is the same as inverse(x).
y = 1. - (1. - x ** a) ** b
self.assertAllClose(y, bijector.inverse(x).eval())
self.assertAllClose(x, bijector.forward(y).eval())
kumaraswamy_log_pdf = (np.log(a) + np.log(b) + (a - 1) * np.log(x) +
(b - 1) * np.log1p(-x ** a))
self.assertAllClose(
np.squeeze(kumaraswamy_log_pdf, axis=-1),
bijector.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(x, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(y, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Kumaraswamy(concentration1=0.5, concentration0=1.1),
lower_x=0., upper_x=1., n=int(10e3), rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
concentration1 = 1.2
concentration0 = 2.
bijector = Kumaraswamy(
concentration1=concentration1,
concentration0=concentration0, validate_args=True)
# Omitting the endpoints 0 and 1, since idlj will be infinity at these
# endpoints.
y = np.linspace(.01, 0.99, num=10).astype(np.float32)
x = 1 - (1 - y ** concentration1) ** concentration0
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/kumaraswamy_bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.framework import errors
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftplusBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation."""
def _softplus(self, x):
return np.log(1 + np.exp(x))
def _softplus_inverse(self, y):
return np.log(np.exp(y) - 1)
def _softplus_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -np.log(1 - np.exp(-y))
def testHingeSoftnessZeroRaises(self):
with self.cached_session():
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"must be non-zero"):
_ = Softplus(hinge_softness=0., validate_args=True)
# Error detected statically; don't need to run op.
def testBijectorForwardInverseEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorForwardInverseWithHingeSoftnessEventDimsZero(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.5)
x = 2 * rng.randn(2, 10)
y = 1.5 * self._softplus(x / 1.5)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softplus_ildj_before_reduction(y)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval())
def testBijectorForwardInverseEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
ildj_before = self._softplus_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Softplus()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithPositiveHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithNegativeHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testBijectiveAndFinite32bit(self):
with self.cached_session():
bijector = Softplus()
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithPositiveHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.23)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithNegativeHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-0.7)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = -np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFinite16bit(self):
with self.cached_session():
bijector = Softplus()
# softplus(-20) is zero, so we can't use such a large range as in 32bit.
x = np.linspace(-10., 20., 100).astype(np.float16)
# Note that float16 is only in the open set (0, inf) for a smaller
# logspace range. The actual range was (-7, 4), so use something smaller
# for the test.
y = np.logspace(-6, 3, 100).astype(np.float16)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-1, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops.bijectors.weibull import Weibull
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class WeibullBijectorTest(test.TestCase):
"""Tests correctness of the weibull bijector."""
def testBijector(self):
with self.cached_session():
scale = 5.
concentration = 0.3
bijector = Weibull(
scale=scale, concentration=concentration,
validate_args=True)
self.assertEqual("weibull", bijector.name)
x = np.array([[[0.], [1.], [14.], [20.], [100.]]], dtype=np.float32)
# Weibull distribution
weibull_dist = stats.frechet_r(c=concentration, scale=scale)
y = weibull_dist.cdf(x).astype(np.float32)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
weibull_dist.logpdf(x),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Weibull(scale=20., concentration=0.3),
lower_x=1., upper_x=100., rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Weibull(
scale=20., concentration=2., validate_args=True)
x = np.linspace(1., 8., num=10).astype(np.float32)
y = np.linspace(
-np.expm1(-1 / 400.),
-np.expm1(-16), num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/weibull_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InlineBijectorTest(test.TestCase):
"""Tests correctness of the inline constructed bijector."""
def testBijector(self):
with self.cached_session():
exp = Exp()
inline = Inline(
forward_fn=math_ops.exp,
inverse_fn=math_ops.log,
inverse_log_det_jacobian_fn=lambda y: -math_ops.log(y),
forward_log_det_jacobian_fn=lambda x: x,
forward_min_event_ndims=0,
name="exp")
self.assertEqual(exp.name, inline.name)
x = [[[1., 2.], [3., 4.], [5., 6.]]]
y = np.exp(x)
self.assertAllClose(y, inline.forward(x).eval())
self.assertAllClose(x, inline.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=-1),
inline.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-inline.inverse_log_det_jacobian(y, event_ndims=1).eval(),
inline.forward_log_det_jacobian(x, event_ndims=1).eval())
def testShapeGetters(self):
with self.cached_session():
bijector = Inline(
forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0),
forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_tensor_fn=lambda x: x[:-1],
inverse_event_shape_fn=lambda x: x[:-1],
forward_min_event_ndims=0,
name="shape_only")
x = tensor_shape.TensorShape([1, 2, 3])
y = tensor_shape.TensorShape([1, 2, 3, 1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The VectorDiffeomixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_addition as linop_add_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_full_matrix as linop_full_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as linop_tril_lib
from tensorflow.python.util import deprecation
__all__ = [
"VectorDiffeomixture",
"quadrature_scheme_softmaxnormal_gauss_hermite",
"quadrature_scheme_softmaxnormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_gauss_hermite(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_softmaxnormal_quantiles`.
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "quadrature_scheme_softmaxnormal_gauss_hermite",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(dt.dtype.as_numpy_dtype)
probs = probs.astype(dt.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dt)
grid = softmax(
-distribution_util.pad(
(normal_loc[..., array_ops.newaxis] +
np.sqrt(2.) * normal_scale[..., array_ops.newaxis] * grid),
axis=-2,
front=True),
axis=-2) # shape: [B, components, deg]
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use SoftmaxNormal quantiles to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "softmax_normal_grid_and_probs",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal_lib.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get dist.batch_shape.ndims, statically if possible."""
ndims = dist.batch_shape.ndims
if ndims is None:
ndims = array_ops.shape(dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = dist.batch_shape.with_rank_at_least(1)
num_components = tensor_shape.dimension_value(bs[-1])
if num_components is not None:
num_components += 1
tail = tensor_shape.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
quantiles.set_shape(_get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(_get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class VectorDiffeomixture(distribution_lib.Distribution):
"""VectorDiffeomixture distribution.
A vector diffeomixture (VDM) is a distribution parameterized by a convex
combination of `K` component `loc` vectors, `loc[k], k = 0,...,K-1`, and `K`
`scale` matrices `scale[k], k = 0,..., K-1`. It approximates the following
[compound distribution]
(https://en.wikipedia.org/wiki/Compound_probability_distribution)
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
The integral `int p(x | z) p(z) dz` is approximated with a quadrature scheme
adapted to the mixture density `p(z)`. The `N` quadrature points `z_{N, n}`
and weights `w_{N, n}` (which are non-negative and sum to 1) are chosen
such that
```q_N(x) := sum_{n=1}^N w_{n, N} p(x | z_{N, n}) --> p(x)```
as `N --> infinity`.
Since `q_N(x)` is in fact a mixture (of `N` points), we may sample from
`q_N` exactly. It is important to note that the VDM is *defined* as `q_N`
above, and *not* `p(x)`. Therefore, sampling and pdf may be implemented as
exact (up to floating point error) methods.
A common choice for the conditional `p(x | z)` is a multivariate Normal.
The implemented marginal `p(z)` is the `SoftmaxNormal`, which is a
`K-1` dimensional Normal transformed by a `SoftmaxCentered` bijector, making
it a density on the `K`-simplex. That is,
```
Z = SoftmaxCentered(X),
X = Normal(mix_loc / temperature, 1 / temperature)
```
The default quadrature scheme chooses `z_{N, n}` as `N` midpoints of
the quantiles of `p(z)` (generalized quantiles if `K > 2`).
See [Dillon and Langmore (2018)][1] for more details.
#### About `Vector` distributions in TensorFlow.
The `VectorDiffeomixture` is a non-standard distribution that has properties
particularly useful in [variational Bayesian
methods](https://en.wikipedia.org/wiki/Variational_Bayesian_methods).
Conditioned on a draw from the SoftmaxNormal, `X|z` is a vector whose
components are linear combinations of affine transformations, thus is itself
an affine transformation.
Note: The marginals `X_1|v, ..., X_d|v` are *not* generally identical to some
parameterization of `distribution`. This is due to the fact that the sum of
draws from `distribution` are not generally itself the same `distribution`.
#### About `Diffeomixture`s and reparameterization.
The `VectorDiffeomixture` is designed to be reparameterized, i.e., its
parameters are only used to transform samples from a distribution which has no
trainable parameters. This property is important because backprop stops at
sources of stochasticity. That is, as long as the parameters are used *after*
the underlying source of stochasticity, the computed gradient is accurate.
Reparametrization means that we can use gradient-descent (via backprop) to
optimize Monte-Carlo objectives. Such objectives are a finite-sample
approximation of an expectation and arise throughout scientific computing.
WARNING: If you backprop through a VectorDiffeomixture sample and the "base"
distribution is both: not `FULLY_REPARAMETERIZED` and a function of trainable
variables, then the gradient is not guaranteed correct!
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
# transformations involve:
# k=0: loc=zeros(dims) scale=LinearOperatorScaledIdentity
# k=1: loc=[2.]*dims scale=LinOpDiag
dims = 5
vdm = tfd.VectorDiffeomixture(
mix_loc=[[0.], [1]],
temperature=[1.],
distribution=tfd.Normal(loc=0., scale=1.),
loc=[
None, # Equivalent to `np.zeros(dims, dtype=np.float32)`.
np.float32([2.]*dims),
],
scale=[
tf.linalg.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
tf.linalg.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
```
#### References
[1]: Joshua Dillon and Ian Langmore. Quadrature Compound: An approximating
family of distributions. _arXiv preprint arXiv:1801.03080_, 2018.
https://arxiv.org/abs/1801.03080
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mix_loc,
temperature,
distribution,
loc=None,
scale=None,
quadrature_size=8,
quadrature_fn=quadrature_scheme_softmaxnormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="VectorDiffeomixture"):
"""Constructs the VectorDiffeomixture on `R^d`.
The vector diffeomixture (VDM) approximates the compound distribution
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
Args:
mix_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`.
In terms of samples, larger `mix_loc[..., k]` ==>
`Z` is more likely to put more weight on its `kth` component.
temperature: `float`-like `Tensor`. Broadcastable with `mix_loc`.
In terms of samples, smaller `temperature` means one component is more
likely to dominate. I.e., smaller `temperature` makes the VDM look more
like a standard mixture of `K` components.
distribution: `tf.Distribution`-like instance. Distribution from which `d`
iid samples are used as input to the selected affine transformation.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorDiffeomixture sample and the `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
loc: Length-`K` list of `float`-type `Tensor`s. The `k`-th element
represents the `shift` used for the `k`-th affine transformation. If
the `k`-th item is `None`, `loc` is implicitly `0`. When specified,
must have shape `[B1, ..., Bb, d]` where `b >= 0` and `d` is the event
size.
scale: Length-`K` list of `LinearOperator`s. Each should be
positive-definite and operate on a `d`-dimensional vector space. The
`k`-th element represents the `scale` used for the `k`-th affine
transformation. `LinearOperator`s must have shape `[B1, ..., Bb, d, d]`,
`b >= 0`, i.e., characterizes `b`-batches of `d x d` matrices
quadrature_size: Python `int` scalar representing number of
quadrature points. Larger `quadrature_size` means `q_N(x)` better
approximates `p(x)`.
quadrature_fn: Python callable taking `normal_loc`, `normal_scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the SoftmaxNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_softmaxnormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if `not scale or len(scale) < 2`.
ValueError: if `len(loc) != len(scale)`
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
ValueError: if `validate_args` and any not scale.is_positive_definite.
TypeError: if any scale.dtype != scale[0].dtype.
TypeError: if any loc.dtype != scale[0].dtype.
NotImplementedError: if `len(scale) != 2`.
ValueError: if `not distribution.is_scalar_batch`.
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[mix_loc, temperature]) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
"num_component >= 2.")
if loc is None:
loc = [None]*len(scale)
if len(loc) != len(scale):
raise ValueError("loc/scale must be same-length lists "
"(or same-length list-like objects).")
dtype = scale[0].dtype.base_dtype
loc = [ops.convert_to_tensor(loc_, dtype=dtype, name="loc{}".format(k))
if loc_ is not None else None
for k, loc_ in enumerate(loc)]
for k, scale_ in enumerate(scale):
if validate_args and not scale_.is_positive_definite:
raise ValueError("scale[{}].is_positive_definite = {} != True".format(
k, scale_.is_positive_definite))
if scale_.dtype.base_dtype != dtype:
raise TypeError(
"dtype mismatch; scale[{}].base_dtype=\"{}\" != \"{}\"".format(
k, scale_.dtype.base_dtype.name, dtype.name))
self._endpoint_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="endpoint_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(loc, scale))]
# TODO(jvdillon): Remove once we support k-mixtures.
# We make this assertion here because otherwise `grid` would need to be a
# vector not a scalar.
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
mix_loc = ops.convert_to_tensor(
mix_loc, dtype=dtype, name="mix_loc")
temperature = ops.convert_to_tensor(
temperature, dtype=dtype, name="temperature")
self._grid, probs = tuple(quadrature_fn(
mix_loc / temperature,
1. / temperature,
quadrature_size,
validate_args))
# Note: by creating the logits as `log(prob)` we ensure that
# `self.mixture_distribution.logits` is equivalent to
# `math_ops.log(self.mixture_distribution.probs)`.
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
self._grid = control_flow_ops.with_dependencies(
asserts, self._grid)
self._distribution = distribution
self._interpolated_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="interpolated_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(
interpolate_loc(self._grid, loc),
interpolate_scale(self._grid, scale)))]
[
self._batch_shape_,
self._batch_shape_tensor_,
self._event_shape_,
self._event_shape_tensor_,
] = determine_batch_event_shapes(self._grid,
self._endpoint_affine)
super(VectorDiffeomixture, self).__init__(
dtype=dtype,
# We hard-code `FULLY_REPARAMETERIZED` because when
# `validate_args=True` we verify that indeed
# `distribution.reparameterization_type == FULLY_REPARAMETERIZED`. A
# distribution which is a function of only non-trainable parameters
# also implies we can use `FULLY_REPARAMETERIZED`. However, we cannot
# easily test for that possibility thus we use `validate_args=False`
# as a "back-door" to allow users a way to use non
# `FULLY_REPARAMETERIZED` distribution. In such cases IT IS THE USERS
# RESPONSIBILITY to verify that the base distribution is a function of
# non-trainable parameters.
reparameterization_type=distribution_lib.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
distribution._graph_parents # pylint: disable=protected-access
+ [loc_ for loc_ in loc if loc_ is not None]
+ [p for scale_ in scale for p in scale_.graph_parents]),
name=name)
@property
def mixture_distribution(self):
"""Distribution used to select a convex combination of affine transforms."""
return self._mixture_distribution
@property
def distribution(self):
"""Base scalar-event, scalar-batch distribution."""
return self._distribution
@property
def grid(self):
"""Grid of mixing probabilities, one for each grid point."""
return self._grid
@property
def endpoint_affine(self):
"""Affine transformation for each of `K` components."""
return self._endpoint_affine
@property
def interpolated_affine(self):
"""Affine transformation for each convex combination of `K` components."""
return self._interpolated_affine
def _batch_shape_tensor(self):
return self._batch_shape_tensor_
def _batch_shape(self):
return self._batch_shape_
def _event_shape_tensor(self):
return self._event_shape_tensor_
def _event_shape(self):
return self._event_shape_
def _sample_n(self, n, seed=None):
x = self.distribution.sample(
sample_shape=concat_vectors(
[n],
self.batch_shape_tensor(),
self.event_shape_tensor()),
seed=seed) # shape: [n, B, e]
x = [aff.forward(x) for aff in self.endpoint_affine]
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = array_ops.reduce_prod(self.batch_shape_tensor())
mix_batch_size = self.mixture_distribution.batch_shape.num_elements()
if mix_batch_size is None:
mix_batch_size = math_ops.reduce_prod(
self.mixture_distribution.batch_shape_tensor())
ids = self.mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size // mix_batch_size])),
seed=distribution_util.gen_new_seed(
seed, "vector_diffeomixture"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `components * quadrature_size` for `batch_size` number of times.
stride = self.grid.shape.with_rank_at_least(
2)[-2:].num_elements()
if stride is None:
stride = array_ops.reduce_prod(
array_ops.shape(self.grid)[-2:])
offset = math_ops.range(start=0,
limit=batch_size * stride,
delta=stride,
dtype=ids.dtype)
weight = array_ops.gather(
array_ops.reshape(self.grid, shape=[-1]),
ids + offset)
# At this point, weight flattened all batch dims into one.
# We also need to append a singleton to broadcast with event dims.
if self.batch_shape.is_fully_defined():
new_shape = [-1] + self.batch_shape.as_list() + [1]
else:
new_shape = array_ops.concat(
([-1], self.batch_shape_tensor(), [1]), axis=0)
weight = array_ops.reshape(weight, shape=new_shape)
if len(x) != 2:
# We actually should have already triggered this exception. However as a
# policy we're putting this exception wherever we exploit the bimixture
# assumption.
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(x)))
# Alternatively:
# x = weight * x[0] + (1. - weight) * x[1]
x = weight * (x[0] - x[1]) + x[1]
return x
def _log_prob(self, x):
# By convention, we always put the grid points right-most.
y = array_ops.stack(
[aff.inverse(x) for aff in self.interpolated_affine],
axis=-1)
log_prob = math_ops.reduce_sum(self.distribution.log_prob(y), axis=-2)
# Because the affine transformation has a constant Jacobian, it is the case
# that `affine.fldj(x) = -affine.ildj(x)`. This is not true in general.
fldj = array_ops.stack([
aff.forward_log_det_jacobian(
x,
event_ndims=array_ops.rank(self.event_shape_tensor())
) for aff in self.interpolated_affine], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits - fldj + log_prob, axis=-1)
def _mean(self):
p = self._expand_mix_distribution_probs()
m = self._expand_base_distribution_mean()
mean = None
for k, aff in enumerate(self.interpolated_affine):
# aff.forward is going to do this:
# y = array_ops.squeeze(aff.scale.matmul(m), axis=[-1])
# if aff.shift is not None:
# y += aff.shift
mean = add(mean, p[..., k] * aff.forward(m))
return mean
def _covariance(self):
# Law of total variance:
#
# Cov[Z] = E[Cov[Z | V]] + Cov[E[Z | V]]
#
# where,
#
# E[Cov[Z | V]] = sum_i mix_prob[i] Scale[i]
# Cov[E[Z | V]] = sum_i mix_prob[i] osquare(loc[i])
# - osquare(sum_i mix_prob[i] loc[i])
#
# osquare(x) = x.transpose @ x
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=False),
self._covariance_of_mean_given_quadrature_component(diag_only=False))
def _variance(self):
# Equivalent to: tf.linalg.tensor_diag_part(self._covariance()),
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=True),
self._covariance_of_mean_given_quadrature_component(diag_only=True))
def _mean_of_covariance_given_quadrature_component(self, diag_only):
p = self.mixture_distribution.probs
# To compute E[Cov(Z|V)], we'll add matrices within three categories:
# scaled-identity, diagonal, and full. Then we'll combine these at the end.
scale_identity_multiplier = None
diag = None
full = None
for k, aff in enumerate(self.interpolated_affine):
s = aff.scale # Just in case aff.scale has side-effects, we'll call once.
if (s is None
or isinstance(s, linop_identity_lib.LinearOperatorIdentity)):
scale_identity_multiplier = add(scale_identity_multiplier,
p[..., k, array_ops.newaxis])
elif isinstance(s, linop_identity_lib.LinearOperatorScaledIdentity):
scale_identity_multiplier = add(
scale_identity_multiplier,
(p[..., k, array_ops.newaxis] * math_ops.square(s.multiplier)))
elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
diag = add(diag, (p[..., k, array_ops.newaxis] *
math_ops.square(s.diag_part())))
else:
x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
s.matmul(s.to_dense(), adjoint_arg=True))
if diag_only:
x = array_ops.matrix_diag_part(x)
full = add(full, x)
# We must now account for the fact that the base distribution might have a
# non-unity variance. Recall that, since X ~ iid Law(X_0),
# `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
# We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
# samples from a scalar-event distribution.
v = self.distribution.variance()
if scale_identity_multiplier is not None:
scale_identity_multiplier *= v
if diag is not None:
diag *= v[..., array_ops.newaxis]
if full is not None:
full *= v[..., array_ops.newaxis]
if diag_only:
# Apparently we don't need the full matrix, just the diagonal.
r = add(diag, full)
if r is None and scale_identity_multiplier is not None:
ones = array_ops.ones(self.event_shape_tensor(), dtype=self.dtype)
return scale_identity_multiplier[..., array_ops.newaxis] * ones
return add(r, scale_identity_multiplier)
# `None` indicates we don't know if the result is positive-definite.
is_positive_definite = (True if all(aff.scale.is_positive_definite
for aff in self.endpoint_affine)
else None)
to_add = []
if diag is not None:
to_add.append(linop_diag_lib.LinearOperatorDiag(
diag=diag,
is_positive_definite=is_positive_definite))
if full is not None:
to_add.append(linop_full_lib.LinearOperatorFullMatrix(
matrix=full,
is_positive_definite=is_positive_definite))
if scale_identity_multiplier is not None:
to_add.append(linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=self.event_shape_tensor()[0],
multiplier=scale_identity_multiplier,
is_positive_definite=is_positive_definite))
return (linop_add_lib.add_operators(to_add)[0].to_dense()
if to_add else None)
def _covariance_of_mean_given_quadrature_component(self, diag_only):
square = math_ops.square if diag_only else vec_osquare
p = self._expand_mix_distribution_probs()
if not diag_only:
p = p[..., array_ops.newaxis, :] # Assuming event.ndims=1.
m = self._expand_base_distribution_mean()
cov_e_z_given_v = None
e_z_given_v = self._mean()
for k, aff in enumerate(self.interpolated_affine):
y = aff.forward(m)
cov_e_z_given_v = add(cov_e_z_given_v,
p[..., k] * square(y - e_z_given_v))
return cov_e_z_given_v
def _expand_base_distribution_mean(self):
"""Ensures `self.distribution.mean()` has `[batch, event]` shape."""
single_draw_shape = concat_vectors(self.batch_shape_tensor(),
self.event_shape_tensor())
m = array_ops.reshape(
self.distribution.mean(), # A scalar.
shape=array_ops.ones_like(single_draw_shape,
dtype=dtypes.int32))
m = array_ops.tile(m, multiples=single_draw_shape)
m.set_shape(self.batch_shape.concatenate(self.event_shape))
return m
def _expand_mix_distribution_probs(self):
p = self.mixture_distribution.probs # [B, deg]
deg = tensor_shape.dimension_value(p.shape.with_rank_at_least(1)[-1])
if deg is None:
deg = array_ops.shape(p)[-1]
event_ndims = self.event_shape.ndims
if event_ndims is None:
event_ndims = array_ops.shape(self.event_shape_tensor())[0]
expand_shape = array_ops.concat([
self.mixture_distribution.batch_shape_tensor(),
array_ops.ones([event_ndims], dtype=dtypes.int32),
[deg],
], axis=0)
return array_ops.reshape(p, shape=expand_shape)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if tensor_shape.dimension_value(param.shape[-1]) != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name,
tensor_shape.dimension_value(
param.shape[-1])))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with ops.name_scope(name="determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = array_ops.shape(grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (array_ops.broadcast_static_shape(event_shape, shape),
array_ops.broadcast_dynamic_shape(
event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.shift.shape[:-1])
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, array_ops.shape(aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:], array_ops.shape(aff.shift)[-1:])
if aff.scale is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.scale.batch_shape)
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tensor_shape.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[array_ops.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_loc(grid, loc):
"""Helper which interpolates between two locs."""
if len(loc) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(loc)))
deg = tensor_shape.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_loc", values=[grid, loc]):
if loc is None or loc[0] is None and loc[1] is None:
return [None]*deg
# shape: [B, 1, k, deg]
w = grid[..., array_ops.newaxis, :, :]
loc = [x[..., array_ops.newaxis] # shape: [B, e, 1]
if x is not None else None for x in loc]
if loc[0] is None:
x = w[..., 1, :] * loc[1] # shape: [B, e, deg]
elif loc[1] is None:
x = w[..., 0, :] * loc[0] # shape: [B, e, deg]
else:
delta = loc[0] - loc[1]
x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg]
return [x[..., k] for k in range(deg)] # list(shape:[B, e])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_scale(grid, scale):
"""Helper which interpolates between two scales."""
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
deg = tensor_shape.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_scale", values=[grid]):
return [linop_add_lib.add_operators([
linop_scale(grid[..., k, q], s)
for k, s in enumerate(scale)
])[0] for q in range(deg)]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def linop_scale(w, op):
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with ops.name_scope("linop_scale", values=[w]):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, linop_diag_lib.LinearOperatorDiag):
return linop_diag_lib.LinearOperatorDiag(
diag=w[..., array_ops.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_tril_lib.LinearOperatorLowerTriangular):
return linop_tril_lib.LinearOperatorLowerTriangular(
tril=w[..., array_ops.newaxis, array_ops.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__))
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def add(x, y):
"""Adds inputs; interprets `None` as zero."""
if x is None:
return y
if y is None:
return x
return x + y
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def vec_osquare(x):
"""Computes the outer-product of a (batch of) vector, i.e., x.T x."""
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def softmax(x, axis, name=None):
"""Equivalent to tf.nn.softmax but works around b/70297725."""
with ops.name_scope(name, "softmax", [x, axis]):
x = ops.convert_to_tensor(x, name="x")
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x, name="ndims"))
axis = ops.convert_to_tensor(axis, dtype=dtypes.int32, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = np.int(ndims + axis_ if axis_ < 0 else axis_)
else:
axis = array_ops.where_v2(axis < 0, ndims + axis, axis)
return nn_ops.softmax(x, axis=axis)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random.gamma` docstring.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.InverseGamma(concentration=3.0, rate=2.0)
dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
@distribution_util.AppendDocstring(
"""Note: See `tf.random.gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where_v2(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (
math_ops.square(self.rate) / math_ops.squared_difference(
self.concentration, 1.) / (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where_v2(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/inverse_gamma.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The OneHotCategorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class OneHotCategorical(distribution.Distribution):
"""OneHotCategorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes. The difference between OneHotCategorical and Categorical
distributions is that OneHotCategorical is a discrete distribution over
one-hot bit vectors whereas Categorical is a discrete distribution over
positive integers. OneHotCategorical is equivalent to Categorical except
Categorical has event_dim=() while OneHotCategorical has event_dim=K, where
K is the number of classes.
This class provides methods to create indexed batches of OneHotCategorical
distributions. If the provided `logits` or `probs` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single OneHotCategorical distribution. When calling distribution
functions (e.g. `dist.prob(x)`), `logits` and `x` are broadcast to the
same shape (if possible). In all cases, the last dimension of `logits,x`
represents single OneHotCategorical distributions.
#### Examples
Creates a 3-class distribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = OneHotCategorical(probs=p)
```
Creates a 3-class distribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-2, 2, 0]
dist = OneHotCategorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = OneHotCategorical(probs=p)
dist.prob([0,1,0]) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match.
samples = [[0,1,0], [1,0,0]]
dist.prob(samples) # Shape [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="OneHotCategorical"):
"""Initialize OneHotCategorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a
set of Categorical distributions. The first `N - 1` dimensions index
into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set
of Categorical distributions. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of probabilities for each class. Only one of `logits` or `probs`
should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
with ops.name_scope(name="event_size"):
self._event_size = array_ops.shape(self._logits)[-1]
super(OneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self.logits)[:-1]
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.logits)[-1:]
def _event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = array_ops.transpose(samples)
samples = array_ops.one_hot(samples, self.event_size, dtype=self.dtype)
ret = array_ops.reshape(samples, sample_shape)
return ret
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, -1))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
logits=logits_2d)
# Reshape back to user-supplied batch and sample dims prior to 2D reshape.
ret = array_ops.reshape(ret, logits_shape)
return ret
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = array_ops.one_hot(ret, self.event_size, dtype=self.dtype)
ret.set_shape(self.logits.get_shape())
return ret
def _covariance(self):
p = self.probs
ret = -math_ops.matmul(p[..., None], p[..., None, :])
return array_ops.matrix_set_diag(ret, self._variance())
def _variance(self):
return self.probs * (1. - self.probs)
def _assert_valid_sample(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_positive(x),
check_ops.assert_near(
array_ops.zeros([], dtype=self.dtype),
math_ops.reduce_logsumexp(x, axis=[-1])),
], x)
@kullback_leibler.RegisterKL(OneHotCategorical, OneHotCategorical)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical", values=[
a.logits, b.logits]):
# sum(p ln(p / q))
return math_ops.reduce_sum(
nn_ops.softmax(a.logits) * (nn_ops.log_softmax(a.logits)
- nn_ops.log_softmax(b.logits)),
axis=-1)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/onehot_categorical.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The RelaxedBernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
# Bijectors must be directly imported because `remove_undocumented` prevents
# individual file imports.
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class RelaxedBernoulli(transformed_distribution.TransformedDistribution):
"""RelaxedBernoulli distribution with temperature and logits parameters.
The RelaxedBernoulli is a distribution over the unit interval (0,1), which
continuously approximates a Bernoulli. The degree of approximation is
controlled by a temperature: as the temperature goes to 0 the
RelaxedBernoulli becomes discrete with a distribution described by the
`logits` or `probs` parameters, as the temperature goes to infinity the
RelaxedBernoulli becomes the constant distribution that is identically 0.5.
The RelaxedBernoulli distribution is a reparameterized continuous
distribution that is the binary special case of the RelaxedOneHotCategorical
distribution (Maddison et al., 2016; Jang et al., 2016). For details on the
binary special case see the appendix of Maddison et al. (2016) where it is
referred to as BinConcrete. If you use this distribution, please cite both
papers.
Some care needs to be taken for loss functions that depend on the
log-probability of RelaxedBernoullis, because computing log-probabilities of
the RelaxedBernoulli can suffer from underflow issues. In many case loss
functions such as these are invariant under invertible transformations of
the random variables. The KL divergence, found in the variational autoencoder
loss, is an example. Because RelaxedBernoullis are sampled by a Logistic
random variable followed by a `tf.sigmoid` op, one solution is to treat
the Logistic as the random variable and `tf.sigmoid` as downstream. The
KL divergences of two Logistics, which are always followed by a `tf.sigmoid`
op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples.
See Maddison et al., 2016 for more details where this distribution is called
the BinConcrete.
An alternative approach is to evaluate Bernoulli log probability or KL
directly on relaxed samples, as done in Jang et al., 2016. In this case,
guarantees on the loss are usually violated. For instance, using a Bernoulli
KL in a relaxed ELBO is no longer a lower bound on the log marginal
probability of the observation. Thus care and early stopping are important.
#### Examples
Creates three continuous distributions, which approximate 3 Bernoullis with
probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedBernoulli(temperature, probs=p)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis
with logits (-2, 2, 0).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = Logistic(logits/temperature, 1./temperature)
samples = dist.sample()
sigmoid_samples = tf.sigmoid(samples)
# sigmoid_samples has the same distribution as samples from
# RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very low, samples from
these distributions are almost discrete, usually taking values very close to 0
or 1.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very high, samples from
these distributions are usually close to the (0.5, 0.5, 0.5) vector.
```python
temperature = 100
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedBernoulli"):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedBernoulli distributions. The temperature should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs, temperature]) as name:
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits, probs=probs, validate_args=validate_args)
super(RelaxedBernoulli, self).__init__(
distribution=logistic.Logistic(
self._logits / self._temperature,
1. / self._temperature,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name + "/Logistic"),
bijector=Sigmoid(validate_args=validate_args),
validate_args=validate_args,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
@property
def logits(self):
"""Log-odds of `1`."""
return self._logits
@property
def probs(self):
"""Probability of `1`."""
return self._probs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
import tensorflow_probability as tfp
tfd = tfp.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = dict(locals())
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
for c in range(self.num_components):
seed = distribution_util.gen_new_seed(seed, "mixture")
samples.append(self.components[c].sample(n, seed=seed))
x = array_ops.stack(
samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask,
axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mixture.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Autoregressive distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Autoregressive(distribution_lib.Distribution):
"""Autoregressive distributions.
The Autoregressive distribution enables learning (often) richer multivariate
distributions by repeatedly applying a [diffeomorphic](
https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as
implemented by `Bijector`s). Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian." [(Papamakarios et
al., 2016)][1]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
Practically speaking the autoregressive property means that there exists a
permutation of the event coordinates such that each coordinate is a
diffeomorphic function of only preceding coordinates
[(van den Oord et al., 2016)][2].
#### Mathematical Details
The probability function is
```none
prob(x; fn, n) = fn(x).prob(x)
```
And a sample is generated by
```none
x = fn(...fn(fn(x0).sample()).sample()).sample()
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
constructs a `tfp.distributions.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
def normal_fn(self, event_size):
n = event_size * (event_size + 1) / 2
p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n))
affine = tfd.bijectors.Affine(
scale_tril=tfd.fill_triangular(0.25 * p))
def _fn(samples):
scale = math_ops.exp(affine.forward(samples)).eval()
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
batch_and_event_shape = [3, 2, 4]
sample0 = array_ops.zeros(batch_and_event_shape)
ar = autoregressive_lib.Autoregressive(
self._normal_fn(batch_and_event_shape[-1]), sample0)
x = ar.sample([6, 5])
# ==> x.shape = [6, 5, 3, 2, 4]
prob_x = ar.prob(x)
# ==> x.shape = [6, 5, 3, 2]
```
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution_fn,
sample0=None,
num_steps=None,
validate_args=False,
allow_nan_stats=True,
name="Autoregressive"):
"""Construct an `Autoregressive` distribution.
Args:
distribution_fn: Python `callable` which constructs a
`tfp.distributions.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the "autoregressive property",
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of on preceding coordinates.
sample0: Initial input to `distribution_fn`; used to
build the distribution in `__init__` which in turn specifies this
distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`.
If unspecified, then `distribution_fn` should be default constructable.
num_steps: Number of times `distribution_fn` is composed from samples,
e.g., `num_steps=2` implies
`distribution_fn(distribution_fn(sample0).sample(n)).sample()`.
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: "Autoregressive".
Raises:
ValueError: if `num_steps` and
`distribution_fn(sample0).event_shape.num_elements()` are both `None`.
ValueError: if `num_steps < 1`.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._distribution_fn = distribution_fn
self._sample0 = sample0
self._distribution0 = (distribution_fn() if sample0 is None
else distribution_fn(sample0))
if num_steps is None:
num_steps = self._distribution0.event_shape.num_elements()
if num_steps is None:
raise ValueError("distribution_fn must generate a distribution "
"with fully known `event_shape`.")
if num_steps < 1:
raise ValueError("num_steps ({}) must be at least 1.".format(num_steps))
self._num_steps = num_steps
super(Autoregressive, self).__init__(
dtype=self._distribution0.dtype,
reparameterization_type=self._distribution0.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=self._distribution0._graph_parents, # pylint: disable=protected-access
name=name)
@property
def distribution_fn(self):
return self._distribution_fn
@property
def sample0(self):
return self._sample0
@property
def num_steps(self):
return self._num_steps
@property
def distribution0(self):
return self._distribution0
def _batch_shape(self):
return self.distribution0.batch_shape
def _batch_shape_tensor(self):
return self.distribution0.batch_shape_tensor()
def _event_shape(self):
return self.distribution0.event_shape
def _event_shape_tensor(self):
return self.distribution0.event_shape_tensor()
def _sample_n(self, n, seed=None):
if seed is None:
seed = distribution_util.gen_new_seed(
seed=np.random.randint(2**32 - 1),
salt="autoregressive")
samples = self.distribution0.sample(n, seed=seed)
for _ in range(self._num_steps):
samples = self.distribution_fn(samples).sample(seed=seed)
return samples
def _log_prob(self, value):
return self.distribution_fn(value).log_prob(value)
def _prob(self, value):
return self.distribution_fn(value).prob(value)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/autoregressive.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
__all__ = [
"SinhArcsinh",
]
class SinhArcsinh(transformed_distribution.TransformedDistribution):
"""The SinhArcsinh transformation of a distribution on `(-inf, inf)`.
This distribution models a random variable, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given random variable `Z`, we define the SinhArcsinh
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation:
```
Y := loc + scale * F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale * Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale * Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="SinhArcsinh"):
"""Construct SinhArcsinh distribution on `(-inf, inf)`.
Arguments `(loc, scale, skewness, tailweight)` must have broadcastable shape
(indexing batch dimensions). They must all have the same `dtype`.
Args:
loc: Floating-point `Tensor`.
scale: `Tensor` of same `dtype` as `loc`.
skewness: Skewness parameter. Default is `0.0` (no skew).
tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)
distribution: `tf.Distribution`-like instance. Distribution that is
transformed to produce this distribution.
Default is `tfp.distributions.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a `SinhArcsinh` sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name,
values=[loc, scale, skewness, tailweight]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
dtype = loc.dtype
scale = ops.convert_to_tensor(scale, name="scale", dtype=dtype)
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
tailweight = ops.convert_to_tensor(
tailweight, name="tailweight", dtype=dtype)
skewness = ops.convert_to_tensor(skewness, name="skewness", dtype=dtype)
batch_shape = distribution_util.get_broadcast_shape(
loc, scale, tailweight, skewness)
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
loc = control_flow_ops.with_dependencies(asserts, loc)
# Make the SAS bijector, 'F'.
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight)
# Make the AffineScalar bijector, Z --> loc + scale * Z (2 / F_0(2))
c = 2 * scale / f_noskew.forward(ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.AffineScalar(
shift=loc,
scale=c,
validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(SinhArcsinh, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/geometric.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vector Student's t distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
class _VectorStudentT(transformed_distribution.TransformedDistribution):
"""A vector version of Student's t-distribution on `R^k`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k
```
where:
* `loc = mu`; a vector in `R^k`,
* `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,
* `Z` denotes the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivariate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name) as name:
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
# pseudocode the basic procedure is:
# if df.batch_shape is scalar:
# if affine.batch_shape is not scalar:
# # broadcast distribution.sample so
# # it has affine.batch_shape.
# self.batch_shape = affine.batch_shape
# else:
# if affine.batch_shape is scalar:
# # let affine broadcasting do its thing.
# self.batch_shape = df.batch_shape
# All of the above magic is actually handled by TransformedDistribution.
# Here we really only need to collect the affine.batch_shape and decide
# what we're going to pass in to TransformedDistribution's
# (override) batch_shape arg.
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=scale_identity_multiplier,
scale_diag=scale_diag,
scale_tril=scale_tril,
scale_perturb_factor=scale_perturb_factor,
scale_perturb_diag=scale_perturb_diag,
validate_args=validate_args)
distribution = student_t.StudentT(
df=df,
loc=array_ops.zeros([], dtype=affine.dtype),
scale=array_ops.ones([], dtype=affine.dtype))
batch_shape, override_event_shape = (
distribution_util.shapes_from_loc_and_scale(
affine.shift, affine.scale))
override_batch_shape = distribution_util.pick_vector(
distribution.is_scalar_batch(),
batch_shape,
constant_op.constant([], dtype=dtypes.int32))
super(_VectorStudentT, self).__init__(
distribution=distribution,
bijector=affine,
batch_shape=override_batch_shape,
event_shape=override_event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self.distribution.df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self.bijector.shift
@property
def scale(self):
"""Dense (batch) covariance matrix, if available."""
return self.bijector.scale
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_student_t.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, atol, rtol]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _entropy(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single Deterministic supported at zero.
constant = tfd.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tfd.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tfd.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = tfd.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/deterministic.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Local PRNG for amplifying seed entropy into seeds for base operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
class SeedStream(object):
"""Local PRNG for amplifying seed entropy into seeds for base operations.
Writing sampling code which correctly sets the pseudo-random number
generator (PRNG) seed is surprisingly difficult. This class serves as
a helper for the TensorFlow Probability coding pattern designed to
avoid common mistakes.
# Motivating Example
A common first-cut implementation of a sampler for the beta
distribution is to compute the ratio of a gamma with itself plus
another gamma. This code snippet tries to do that, but contains a
surprisingly common error:
```python
def broken_beta(shape, alpha, beta, seed):
x = tf.random.gamma(shape, alpha, seed=seed)
y = tf.random.gamma(shape, beta, seed=seed)
return x / (x + y)
```
The mistake is that the two gamma draws are seeded with the same
seed. This causes them to always produce the same results, which,
in turn, leads this code snippet to always return `0.5`. Because it
can happen across abstraction boundaries, this kind of error is
surprisingly easy to make when handling immutable seeds.
# Goals
TensorFlow Probability adopts a code style designed to eliminate the
above class of error, without exacerbating others. The goals of
this code style are:
- Support reproducibility of results (by encouraging seeding of all
pseudo-random operations).
- Avoid shared-write global state (by not relying on a global PRNG).
- Prevent accidental seed reuse by TF Probability implementers. This
goal is served with the local pseudo-random seed generator provided
in this module.
- Mitigate potential accidental seed reuse by TF Probability clients
(with a salting scheme).
- Prevent accidental resonances with downstream PRNGs (by hashing the
output).
## Non-goals
- Implementing a high-performance PRNG for generating large amounts of
entropy. That's the job of the underlying TensorFlow PRNG we are
seeding.
- Avoiding random seed collisions, aka "birthday attacks".
# Code pattern
```python
def random_beta(shape, alpha, beta, seed): # (a)
seed = SeedStream(seed, salt="random_beta") # (b)
x = tf.random.gamma(shape, alpha, seed=seed()) # (c)
y = tf.random.gamma(shape, beta, seed=seed()) # (c)
return x / (x + y)
```
The elements of this pattern are:
- Accept an explicit seed (line a) as an argument in all public
functions, and write the function to be deterministic (up to any
numerical issues) for fixed seed.
- Rationale: This provides the client with the ability to reproduce
results. Accepting an immutable seed rather than a mutable PRNG
object reduces code coupling, permitting different sections to be
reproducible independently.
- Use that seed only to initialize a local `SeedStream` instance (line b).
- Rationale: Avoids accidental seed reuse.
- Supply the name of the function being implemented as a salt to the
`SeedStream` instance (line b). This serves to keep the salts
unique; unique salts ensure that clients of TF Probability will see
different functions always produce independent results even if
called with the same seeds.
- Seed each callee operation with the output of a unique call to the
`SeedStream` instance (lines c). This ensures reproducibility of
results while preventing seed reuse across callee invocations.
# Why salt?
Salting the `SeedStream` instances (with unique salts) is defensive
programming against a client accidentally committing a mistake
similar to our motivating example. Consider the following situation
that might arise without salting:
```python
def tfp_foo(seed):
seed = SeedStream(seed, salt="")
foo_stuff = tf.random.normal(seed=seed())
...
def tfp_bar(seed):
seed = SeedStream(seed, salt="")
bar_stuff = tf.random.normal(seed=seed())
...
def client_baz(seed):
foo = tfp_foo(seed=seed)
bar = tfp_bar(seed=seed)
...
```
The client should have used different seeds as inputs to `foo` and
`bar`. However, because they didn't, *and because `foo` and `bar`
both sample a Gaussian internally as their first action*, the
internal `foo_stuff` and `bar_stuff` will be the same, and the
returned `foo` and `bar` will not be independent, leading to subtly
incorrect answers from the client's simulation. This kind of bug is
particularly insidious for the client, because it depends on a
Distributions implementation detail, namely the order in which `foo`
and `bar` invoke the samplers they depend on. In particular, a
Bayesflow team member can introduce such a bug in previously
(accidentally) correct client code by performing an internal
refactoring that causes this operation order alignment.
A salting discipline eliminates this problem by making sure that the
seeds seen by `foo`'s callees will differ from those seen by `bar`'s
callees, even if `foo` and `bar` are invoked with the same input
seed.
"""
def __init__(self, seed, salt):
"""Initializes a `SeedStream`.
Args:
seed: Any Python object convertible to string, supplying the
initial entropy. If `None`, operations seeded with seeds
drawn from this `SeedStream` will follow TensorFlow semantics
for not being seeded.
salt: Any Python object convertible to string, supplying
auxiliary entropy. Must be unique across the Distributions
and TensorFlow Probability code base. See class docstring for
rationale.
"""
self._seed = seed.original_seed if isinstance(seed, SeedStream) else seed
self._salt = salt
self._counter = 0
def __call__(self):
"""Returns a fresh integer usable as a seed in downstream operations.
If this `SeedStream` was initialized with `seed=None`, returns
`None`. This has the effect that downstream operations (both
`SeedStream`s and primitive TensorFlow ops) will behave as though
they were unseeded.
The returned integer is non-negative, and uniformly distributed in
the half-open interval `[0, 2**512)`. This is consistent with
TensorFlow, as TensorFlow operations internally use the residue of
the given seed modulo `2**31 - 1` (see
`tensorflow/python/framework/random_seed.py`).
Returns:
seed: A fresh integer usable as a seed in downstream operations,
or `None`.
"""
self._counter += 1
if self._seed is None:
return None
composite = str((self._seed, self._counter, self._salt)).encode("utf-8")
return int(hashlib.sha512(composite).hexdigest(), 16)
@property
def original_seed(self):
return self._seed
@property
def salt(self):
return self._salt
# Design rationales for the SeedStream class
#
# - Salts are accepted for the reason given above to supply them.
#
# - A `None` seed propagates to downstream seeds, so they exhibit
# their "unseeded" behavior.
#
# - The return value is a Python int so it can be passed directly to
# TensorFlow operations as a seed. It is large to avoid losing seed
# space needlessly (TF will internally read only the last 31 bits).
#
# - The output is hashed with a crypto-grade hash function as a form
# of defensive programming: this reliably prevents all possible
# accidental resonances with all possible downstream PRNGs. The
# specific function used is not important; SHA512 was ready to hand.
#
# - The internal state update is a simple counter because (a) given
# that the output is hashed anyway, this is enough, and (b) letting
# it be this predictable permits a future "generate many seeds in
# parallel" operation whose results would agree with running
# sequentially.
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/seed_stream.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution of a vectorized Exponential, with uncorrelated components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import vector_exponential_linear_operator as vector_exponential_linop
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
__all__ = [
"VectorExponentialDiag",
]
class VectorExponentialDiag(
vector_exponential_linop.VectorExponentialLinearOperator):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is defined over the image of the
`scale` matrix + `loc`, applied to the positive half-space:
`Supp = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`. On this set,
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in Supp
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
# The first component has pdf exp{-x}, the second 0.5 exp{-x / 2}
vex = tfd.VectorExponentialDiag(scale_diag=[1., 2.])
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([3., 4.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
loc = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = tfd.VectorExponentialDiag(loc, scale_diag)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialDiag"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(VectorExponentialDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conditional distribution base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class ConditionalDistribution(distribution.Distribution):
"""Distribution that supports intrinsic parameters (local latents).
Subclasses of this distribution may have additional keyword arguments passed
to their sample-based methods (i.e. `sample`, `log_prob`, etc.).
"""
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
return self._call_sample_n(sample_shape, seed, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_prob(self, value, name="log_prob", **condition_kwargs):
return self._call_log_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def prob(self, value, name="prob", **condition_kwargs):
return self._call_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
return self._call_log_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def cdf(self, value, name="cdf", **condition_kwargs):
return self._call_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
return self._call_log_survival_function(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def survival_function(self, value, name="survival_function",
**condition_kwargs):
return self._call_survival_function(value, name, **condition_kwargs)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/conditional_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalTriL",
]
class MultivariateNormalTriL(mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a matrix in `R^{k x k}`, `covariance = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero diagonal,
i.e., `tf.linalg.tensor_diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
Trainable (batch) lower-triangular matrices can be created with
`tfp.distributions.matrix_diag_transform()` and/or
`tfp.distributions.fill_triangular()`
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.linalg.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=scale)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
tril = ... # shape: [2, 3, 3], lower triangular, non-zero diagonal.
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=tril)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
# Instantiate a "learnable" MVN.
dims = 4
with tf.compat.v1.variable_scope("model"):
mvn = tfd.MultivariateNormalTriL(
loc=tf.compat.v1.get_variable(shape=[dims], dtype=tf.float32,
name="mu"),
scale_tril=tfd.fill_triangular(
tf.compat.v1.get_variable(shape=[dims * (dims + 1) / 2],
dtype=tf.float32, name="chol_Sigma")))
```
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_tril=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalTriL"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero
diagonal, i.e., `tf.linalg.tensor_diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where `b
>= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `scale_tril` are specified.
"""
parameters = dict(locals())
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
if loc is None and scale_tril is None:
raise ValueError("Must specify one or both of `loc`, `scale_tril`.")
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, scale_tril]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
if scale_tril is None:
scale = linalg.LinearOperatorIdentity(
num_rows=distribution_util.dimension_size(loc, -1),
dtype=loc.dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
else:
# No need to validate that scale_tril is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular
# method that is called by the Bijector.
scale = linalg.LinearOperatorLowerTriangular(
scale_tril,
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=False)
super(MultivariateNormalTriL, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mvn_tril.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Poisson distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"Poisson",
]
_poisson_sample_note = """
The Poisson distribution is technically only defined for non-negative integer
values. When `validate_args=False`, non-integral inputs trigger an assertion.
When `validate_args=False` calculations are otherwise unchanged despite
integral or non-integral inputs.
When `validate_args=False`, evaluating the pmf at non-integral values,
corresponds to evaluations of an unnormalized distribution, that does not
correspond to evaluations of the cdf.
"""
class Poisson(distribution.Distribution):
"""Poisson distribution.
The Poisson distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z
Z = exp(lambda).
```
where `rate = lambda` and `Z` is the normalizing constant.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
rate=None,
log_rate=None,
validate_args=False,
allow_nan_stats=True,
name="Poisson"):
"""Initialize a batch of Poisson distributions.
Args:
rate: Floating point tensor, the rate parameter. `rate` must be positive.
Must specify exactly one of `rate` and `log_rate`.
log_rate: Floating point tensor, the log of the rate parameter.
Must specify exactly one of `rate` and `log_rate`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if none or both of `rate`, `log_rate` are specified.
TypeError: if `rate` is not a float-type.
TypeError: if `log_rate` is not a float-type.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[rate]) as name:
if (rate is None) == (log_rate is None):
raise ValueError("Must specify exactly one of `rate` and `log_rate`.")
elif log_rate is None:
rate = ops.convert_to_tensor(rate, name="rate")
if not rate.dtype.is_floating:
raise TypeError("rate.dtype ({}) is a not a float-type.".format(
rate.dtype.name))
with ops.control_dependencies([check_ops.assert_positive(rate)] if
validate_args else []):
self._rate = array_ops.identity(rate, name="rate")
self._log_rate = math_ops.log(rate, name="log_rate")
else:
log_rate = ops.convert_to_tensor(log_rate, name="log_rate")
if not log_rate.dtype.is_floating:
raise TypeError("log_rate.dtype ({}) is a not a float-type.".format(
log_rate.dtype.name))
self._rate = math_ops.exp(log_rate, name="rate")
self._log_rate = ops.convert_to_tensor(log_rate, name="log_rate")
super(Poisson, self).__init__(
dtype=self._rate.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._rate],
name=name)
@property
def rate(self):
"""Rate parameter."""
return self._rate
@property
def log_rate(self):
"""Log rate parameter."""
return self._log_rate
def _batch_shape_tensor(self):
return array_ops.shape(self.rate)
def _batch_shape(self):
return self.rate.shape
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.igammac(1. + x, self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return x * self.log_rate - math_ops.lgamma(1. + x)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _sample_n(self, n, seed=None):
return random_ops.random_poisson(
self.rate, [n], dtype=self.dtype, seed=seed)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/poisson.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Laplace distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceLinearOperator"
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorLaplaceLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale / tf.sqrt(2.)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
vla.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Laplace's.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceLinearOperator"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorLaplaceLinearOperator, self).__init__(
distribution=laplace.Laplace(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorLaplaceLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorLaplaceLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
# Then this distribution is
# X = loc + LW,
# and since E[X] = loc,
# Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
# Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
# Cov(X) = 2 LL^T
if distribution_util.is_diagonal_scale(self.scale):
return 2. * array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors import AffineLinearOperator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalLinearOperator",
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
# TODO(b/35290280): Import in `../../__init__.py` after adding unit-tests.
class MultivariateNormalLinearOperator(
transformed_distribution.TransformedDistribution):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale))
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(MultivariateNormalLinearOperator, self).__init__(
distribution=normal.Normal(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(MultivariateNormalLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(MultivariateNormalLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
@kullback_leibler.RegisterKL(MultivariateNormalLinearOperator,
MultivariateNormalLinearOperator)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_brute_force(a, b, name=None):
"""Batched KL divergence `KL(a || b)` for multivariate Normals.
With `X`, `Y` both multivariate Normals in `R^k` with means `mu_a`, `mu_b` and
covariance `C_a`, `C_b` respectively,
```
KL(a || b) = 0.5 * ( L - k + T + Q ),
L := Log[Det(C_b)] - Log[Det(C_a)]
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k**2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
a: Instance of `MultivariateNormalLinearOperator`.
b: Instance of `MultivariateNormalLinearOperator`.
name: (optional) name to use for created ops. Default "kl_mvn".
Returns:
Batchwise `KL(a || b)`.
"""
def squared_frobenius_norm(x):
"""Helper to make KL calculation slightly more readable."""
# http://mathworld.wolfram.com/FrobeniusNorm.html
# The gradient of KL[p,q] is not defined when p==q. The culprit is
# linalg_ops.norm, i.e., we cannot use the commented out code.
# return math_ops.square(linalg_ops.norm(x, ord="fro", axis=[-2, -1]))
return math_ops.reduce_sum(math_ops.square(x), axis=[-2, -1])
# TODO(b/35041439): See also b/35040945. Remove this function once LinOp
# supports something like:
# A.inverse().solve(B).norm(order='fro', axis=[-1, -2])
def is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, linalg.LinearOperatorIdentity) or
isinstance(x, linalg.LinearOperatorScaledIdentity) or
isinstance(x, linalg.LinearOperatorDiag))
with ops.name_scope(name, "kl_mvn", values=[a.loc, b.loc] +
a.scale.graph_parents + b.scale.graph_parents):
# Calculation is based on:
# http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
# and,
# https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
# i.e.,
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ij} (inv(B) A)_{ij}**2
# = ||inv(B) A||_F**2
# where ||.||_F is the Frobenius norm and the second equality follows from
# the cyclic permutation property.
if is_diagonal(a.scale) and is_diagonal(b.scale):
# Using `stddev` because it handles expansion of Identity cases.
b_inv_a = (a.stddev() / b.stddev())[..., array_ops.newaxis]
else:
b_inv_a = b.scale.solve(a.scale.to_dense())
kl_div = (b.scale.log_abs_determinant()
- a.scale.log_abs_determinant()
+ 0.5 * (
- math_ops.cast(a.scale.domain_dimension_tensor(), a.dtype)
+ squared_frobenius_norm(b_inv_a)
+ squared_frobenius_norm(b.scale.solve(
(b.mean() - a.mean())[..., array_ops.newaxis]))))
kl_div.set_shape(array_ops.broadcast_static_shape(
a.batch_shape, b.batch_shape))
return kl_div
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Logistic distribution.
dist = tfd.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Arguments are broadcast when possible.
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return -nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/logistic.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Kumaraswamy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import uniform
from tensorflow.python.util import deprecation
__all__ = [
"Kumaraswamy",
]
_kumaraswamy_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = array_ops.ones([], dtype=x.dtype)
return math_ops.digamma(x + one) - math_ops.digamma(one)
class Kumaraswamy(transformed_distribution.TransformedDistribution):
"""Kumaraswamy distribution.
The Kumaraswamy distribution is defined over the `(0, 1)` interval using
parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta"). It has a
shape similar to the Beta distribution, but is reparameterizeable.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = alpha * beta * x**(alpha - 1) * (1 - x**alpha)**(beta -
1)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
# Create a batch of three Kumaraswamy distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = Kumaraswamy(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = Kumaraswamy(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Kumaraswamy"):
"""Initialize a batch of Kumaraswamy distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
concentration1 = ops.convert_to_tensor(
concentration1, name="concentration1")
concentration0 = ops.convert_to_tensor(
concentration0, name="concentration0")
super(Kumaraswamy, self).__init__(
distribution=uniform.Uniform(
low=array_ops.zeros([], dtype=concentration1.dtype),
high=array_ops.ones([], dtype=concentration1.dtype),
allow_nan_stats=allow_nan_stats),
bijector=bijectors.Kumaraswamy(
concentration1=concentration1, concentration0=concentration0,
validate_args=validate_args),
batch_shape=distribution_util.get_broadcast_shape(
concentration1, concentration0),
name=name)
self._reparameterization_type = distribution.FULLY_REPARAMETERIZED
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self.bijector.concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self.bijector.concentration0
def _entropy(self):
a = self.concentration1
b = self.concentration0
return (1 - 1. / a) + (
1 - 1. / b) * _harmonic_number(b) + math_ops.log(a) + math_ops.log(b)
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = array_ops.stack([beta_arg0, expanded_concentration0], -1)
log_moment = math_ops.log(expanded_concentration0) + special_math_ops.lbeta(
beta_arg)
return math_ops.exp(log_moment)
def _mean(self):
return self._moment(1)
def _variance(self):
# TODO(b/72696533): Investigate a more numerically stable version.
return self._moment(2) - math_ops.square(self._moment(1))
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
a = self.concentration1
b = self.concentration0
mode = ((a - 1) / (a * b - 1))**(1. / a)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
name="nan")
is_defined = (self.concentration1 > 1.) & (self.concentration0 > 1.)
return array_ops.where_v2(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration1.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration0.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/kumaraswamy.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution of a vectorized Laplace, with uncorrelated components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import vector_laplace_linear_operator as vector_laplace_linop
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceDiag",
]
class VectorLaplaceDiag(
vector_laplace_linop.VectorLaplaceLinearOperator):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorLaplace.
vla = tfd.VectorLaplaceDiag(
loc=[1., -1],
scale_diag=[1, 2.])
vla.mean().eval()
# ==> [1., -1]
vla.stddev().eval()
# ==> [1., 2] * sqrt(2)
# Evaluate this on an observation in `R^2`, returning a scalar.
vla.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity VectorLaplace.
vla = tfd.VectorLaplaceDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
vla.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
vla.stddev().eval() # shape: [3, 2]
# ==> sqrt(2) * [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
vla.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate VectorLaplace's.
vla = tfd.VectorLaplaceDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceDiag"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name):
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(VectorLaplaceDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Half Normal distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
__all__ = [
"HalfNormal",
]
class HalfNormal(distribution.Distribution):
"""The Half Normal distribution with scale `scale`.
#### Mathematical details
The half normal is a transformation of a centered normal distribution.
If some random variable `X` has normal distribution,
```none
X ~ Normal(0.0, scale)
Y = |X|
```
Then `Y` will have half normal distribution. The probability density
function (pdf) is:
```none
pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) *
exp(- 1/2 * (x / scale) ** 2)
)
```
Where `scale = sigma` is the standard deviation of the underlying normal
distribution.
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar HalfNormal distribution.
dist = tfd.HalfNormal(scale=3.0)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued HalfNormals.
# The first has scale 11.0, the second 22.0
dist = tfd.HalfNormal(scale=[11.0, 22.0])
# Evaluate the pdf of the first distribution on 1.0, and the second on 1.5,
# returning a length two tensor.
dist.prob([1.0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
scale,
validate_args=False,
allow_nan_stats=True,
name="HalfNormal"):
"""Construct HalfNormals with scale `scale`.
Args:
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._scale = array_ops.identity(scale, name="scale")
super(HalfNormal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"scale": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def scale(self):
"""Distribution parameter for the scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.shape(self.scale)
def _batch_shape(self):
return self.scale.shape
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)
return math_ops.abs(sampled * self.scale)
def _prob(self, x):
coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi)
pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2)
return pdf * math_ops.cast(x >= 0, self.dtype)
def _cdf(self, x):
truncated_x = nn.relu(x)
return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0))
def _entropy(self):
return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5
def _mean(self):
return self.scale * np.sqrt(2.0) / np.sqrt(np.pi)
def _quantile(self, p):
return np.sqrt(2.0) * self.scale * special_math.erfinv(p)
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor())
def _variance(self):
return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/half_normal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing moving statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
__all__ = [
"assign_moving_mean_variance",
"assign_log_moving_mean_exp",
"moving_mean_variance",
]
def assign_moving_mean_variance(
mean_var, variance_var, value, decay, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The `value` updated exponentially weighted moving `mean_var` and
`variance_var` are given by the following recurrence relations:
```python
variance_var = decay * (variance_var + (1-decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-1 mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Args:
mean_var: `float`-like `Variable` representing the exponentially weighted
moving mean. Same shape as `variance_var` and `value`.
variance_var: `float`-like `Variable` representing the
exponentially weighted moving variance. Same shape as `mean_var` and
`value`.
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `mean_var` does not have float type `dtype`.
TypeError: if `mean_var`, `variance_var`, `value`, `decay` have different
`base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with ops.name_scope(name, "assign_moving_mean_variance",
[variance_var, mean_var, value, decay]):
with ops.colocate_with(variance_var):
with ops.colocate_with(mean_var):
base_dtype = mean_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"mean_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
if base_dtype != variance_var.dtype.base_dtype:
raise TypeError(
"mean_var.base_dtype({}) != variance_var.base_dtype({})".format(
base_dtype.name,
variance_var.dtype.base_dtype.name))
value = ops.convert_to_tensor(value, dtype=base_dtype, name="value")
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
delta = value - mean_var
with ops.control_dependencies([delta]):
mean_var = state_ops.assign_add(
mean_var,
(1. - decay) * delta)
variance_var = state_ops.assign_sub(
variance_var,
(1. - decay) * (variance_var - decay * math_ops.square(delta)))
return mean_var, variance_var
def assign_log_moving_mean_exp(
log_mean_exp_var, log_value, decay, name=None):
"""Compute the log of the exponentially weighted moving mean of the exp.
If `log_value` is a draw from a stationary random variable, this function
approximates `log(E[exp(log_value)])`, i.e., a weighted log-sum-exp. More
precisely, a `tf.Variable`, `log_mean_exp_var`, is updated by `log_value`
using the following identity:
```none
log_mean_exp_var =
= log(decay exp(log_mean_exp_var) + (1 - decay) exp(log_value))
= log(exp(log_mean_exp_var + log(decay)) + exp(log_value + log1p(-decay)))
= log_mean_exp_var
+ log( exp(log_mean_exp_var - log_mean_exp_var + log(decay))
+ exp(log_value - log_mean_exp_var + log1p(-decay)))
= log_mean_exp_var
+ log_sum_exp([log(decay), log_value - log_mean_exp_var + log1p(-decay)]).
```
In addition to numerical stability, this formulation is advantageous because
`log_mean_exp_var` can be updated in a lock-free manner, i.e., using
`assign_add`. (Note: the updates are not thread-safe; it's just that the
update to the tf.Variable is presumed efficient due to being lock-free.)
Args:
log_mean_exp_var: `float`-like `Variable` representing the log of the
exponentially weighted moving mean of the exp. Same shape as `log_value`.
log_value: `float`-like `Tensor` representing a new (streaming) observation.
Same shape as `log_mean_exp_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
log_mean_exp_var: A reference to the input 'Variable' tensor with the
`log_value`-updated log of the exponentially weighted moving mean of exp.
Raises:
TypeError: if `log_mean_exp_var` does not have float type `dtype`.
TypeError: if `log_mean_exp_var`, `log_value`, `decay` have different
`base_dtype`.
"""
with ops.name_scope(name, "assign_log_moving_mean_exp",
[log_mean_exp_var, log_value, decay]):
# We want to update the variable in a numerically stable and lock-free way.
# To do this, observe that variable `x` updated by `v` is:
# x = log(w exp(x) + (1-w) exp(v))
# = log(exp(x + log(w)) + exp(v + log1p(-w)))
# = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w)))
# = x + lse([log(w), v - x + log1p(-w)])
with ops.colocate_with(log_mean_exp_var):
base_dtype = log_mean_exp_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"log_mean_exp_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
log_value = ops.convert_to_tensor(log_value, dtype=base_dtype,
name="log_value")
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
delta = (log_value - log_mean_exp_var)[array_ops.newaxis, ...]
x = array_ops.concat([
math_ops.log(decay) * array_ops.ones_like(delta),
delta + math_ops.log1p(-decay)
], axis=0)
x = math_ops.reduce_logsumexp(x, axis=0)
return log_mean_exp_var.assign_add(x)
def moving_mean_variance(value, decay, collections=None, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The exponentially-weighting moving `mean_var` and `variance_var` are updated
by `value` according to the following recurrence:
```python
variance_var = decay * (variance_var + (1-decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-`1` mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Unlike `assign_moving_mean_variance`, this function handles
variable creation.
Args:
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
collections: Python list of graph-collections keys to which the internal
variables `mean_var` and `variance_var` are added.
Default value is `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `value_var` does not have float type `dtype`.
TypeError: if `value`, `decay` have different `base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(
name, "moving_mean_variance", [value, decay]):
value = ops.convert_to_tensor(value, name="value")
base_dtype = value.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"value.base_dtype({}) does not have float type `dtype`.".format(
base_dtype.name))
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
variance_var = variable_scope.get_variable(
"moving_variance",
shape=value.shape,
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
mean_var = variable_scope.get_variable(
"moving_mean",
shape=value.shape,
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
return assign_moving_mean_variance(
mean_var, variance_var, value, decay)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/moving_stats.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_lib
from tensorflow.python.util import deprecation
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., array_ops.newaxis]
+ np.sqrt(2.) * scale[..., array_ops.newaxis] * grid)
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
# Create a LogNormal distribution.
dist = transformed_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=loc, scale=scale),
bijector=Exp(),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = array_ops.shape(dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
if loc is not None:
loc = ops.convert_to_tensor(loc, name="loc")
if scale is not None:
scale = ops.convert_to_tensor(
scale, dtype=None if loc is None else loc.dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson_lib.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=distribution_lib.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with quadrature param."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a quadrature grid."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
array_ops.shape(self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = math_ops.reduce_prod(self.batch_shape_tensor())
# We need to "sample extra" from the mixture distribution if it doesn't
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=distribution_util.gen_new_seed(
seed, "poisson_lognormal_quadrature_compound"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = math_ops.range(start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = array_ops.gather(
array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
rate = array_ops.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return random_ops.random_poisson(
lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return math_ops.reduce_logsumexp(
(self.mixture_distribution.logits
+ self.distribution.log_prob(x[..., array_ops.newaxis])),
axis=-1)
def _mean(self):
return math_ops.exp(
math_ops.reduce_logsumexp(
self.mixture_distribution.logits + self.distribution.log_rate,
axis=-1))
def _variance(self):
return math_ops.exp(self._log_variance())
def _stddev(self):
return math_ops.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = array_ops.stack([
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self.distribution.log_rate,
# log((Mean[d] - Mean)**2)
2. * math_ops.log(
math_ops.abs(self.distribution.mean()
- self._mean()[..., array_ops.newaxis])),
], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits[..., array_ops.newaxis] + v,
axis=[-2, -1])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/poisson_lognormal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Independent distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.util import deprecation
class Independent(distribution_lib.Distribution):
"""Independent distribution from batch of distributions.
This distribution is useful for regarding a collection of independent,
non-identical distributions as a single random variable. For example, the
`Independent` distribution composed of a collection of `Bernoulli`
distributions might define a distribution over an image (where each
`Bernoulli` is a distribution over each pixel).
More precisely, a collection of `B` (independent) `E`-variate random variables
(rv) `{X_1, ..., X_B}`, can be regarded as a `[B, E]`-variate random variable
`(X_1, ..., X_B)` with probability
`p(x_1, ..., x_B) = p_1(x_1) * ... * p_B(x_B)` where `p_b(X_b)` is the
probability of the `b`-th rv. More generally `B, E` can be arbitrary shapes.
Similarly, the `Independent` distribution specifies a distribution over `[B,
E]`-shaped events. It operates by reinterpreting the rightmost batch dims as
part of the event dimensions. The `reinterpreted_batch_ndims` parameter
controls the number of batch dims which are absorbed as event dims;
`reinterpreted_batch_ndims < len(batch_shape)`. For example, the `log_prob`
function entails a `reduce_sum` over the rightmost `reinterpreted_batch_ndims`
after calling the base distribution's `log_prob`. In other words, since the
batch dimension(s) index independent distributions, the resultant multivariate
will have independent components.
#### Mathematical Details
The probability function is,
```none
prob(x; reinterpreted_batch_ndims) = tf.reduce_prod(
dist.prob(x),
axis=-1-range(reinterpreted_batch_ndims))
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Make independent distribution from a 2-batch Normal.
ind = tfd.Independent(
distribution=tfd.Normal(loc=[-1., 1], scale=[0.1, 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2]
# Make independent distribution from a 2-batch bivariate Normal.
ind = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]],
scale_identity_multiplier=[1., 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2, 2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self, distribution, reinterpreted_batch_ndims=None,
validate_args=False, name=None):
"""Construct a `Independent` distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
reinterpreted_batch_ndims: Scalar, integer number of rightmost batch dims
which will be regarded as event dims. When `None` all but the first
batch axis (batch axis 0) will be transferred to event dimensions
(analogous to `tf.compat.v1.layers.flatten`).
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for ops managed by the distribution.
Default value: `Independent + distribution.name`.
Raises:
ValueError: if `reinterpreted_batch_ndims` exceeds
`distribution.batch_ndims`
"""
parameters = dict(locals())
name = name or "Independent" + distribution.name
self._distribution = distribution
with ops.name_scope(name) as name:
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = self._get_default_reinterpreted_batch_ndims(
distribution)
reinterpreted_batch_ndims = ops.convert_to_tensor(
reinterpreted_batch_ndims,
dtype=dtypes.int32,
name="reinterpreted_batch_ndims")
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
self._static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
if self._static_reinterpreted_batch_ndims is not None:
self._reinterpreted_batch_ndims = self._static_reinterpreted_batch_ndims
super(Independent, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
graph_parents=(
[reinterpreted_batch_ndims] +
distribution._graph_parents), # pylint: disable=protected-access
name=name)
self._runtime_assertions = self._make_runtime_assertions(
distribution, reinterpreted_batch_ndims, validate_args)
@property
def distribution(self):
return self._distribution
@property
def reinterpreted_batch_ndims(self):
return self._reinterpreted_batch_ndims
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
return batch_shape[:batch_ndims - self.reinterpreted_batch_ndims]
def _batch_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[:d]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
return array_ops.concat([
batch_shape[batch_ndims - self.reinterpreted_batch_ndims:],
self.distribution.event_shape_tensor(),
], axis=0)
def _event_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[d:].concatenate(self.distribution.event_shape)
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.sample(sample_shape=n, seed=seed)
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.log_prob(x))
def _entropy(self):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.entropy())
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mean()
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.variance()
def _stddev(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.stddev()
def _mode(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mode()
def _make_runtime_assertions(
self, distribution, reinterpreted_batch_ndims, validate_args):
assertions = []
static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
batch_ndims = distribution.batch_shape.ndims
if batch_ndims is not None and static_reinterpreted_batch_ndims is not None:
if static_reinterpreted_batch_ndims > batch_ndims:
raise ValueError("reinterpreted_batch_ndims({}) cannot exceed "
"distribution.batch_ndims({})".format(
static_reinterpreted_batch_ndims, batch_ndims))
elif validate_args:
batch_shape = distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (
dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
assertions.append(check_ops.assert_less_equal(
reinterpreted_batch_ndims, batch_ndims,
message=("reinterpreted_batch_ndims cannot exceed "
"distribution.batch_ndims")))
return assertions
def _reduce_sum(self, stat):
if self._static_reinterpreted_batch_ndims is None:
range_ = math_ops.range(self._reinterpreted_batch_ndims)
else:
range_ = np.arange(self._static_reinterpreted_batch_ndims)
return math_ops.reduce_sum(stat, axis=-1-range_)
def _get_default_reinterpreted_batch_ndims(self, distribution):
"""Computes the default value for reinterpreted_batch_ndim __init__ arg."""
ndims = distribution.batch_shape.ndims
if ndims is None:
which_maximum = math_ops.maximum
ndims = array_ops.shape(distribution.batch_shape_tensor())[0]
else:
which_maximum = np.maximum
return which_maximum(0, ndims - 1)
@kullback_leibler.RegisterKL(Independent, Independent)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if a.event_shape.is_fully_defined() and b.event_shape.is_fully_defined():
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with ops.control_dependencies([
check_ops.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()),
check_ops.assert_equal(p.event_shape_tensor(), q.event_shape_tensor())
]):
num_reduce_dims = (
array_ops.shape(a.event_shape_tensor()[0]) -
array_ops.shape(p.event_shape_tensor()[0]))
reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1)
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/independent.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Relaxed OneHotCategorical distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class ExpRelaxedOneHotCategorical(distribution.Distribution):
"""ExpRelaxedOneHotCategorical distribution with temperature and logits.
An ExpRelaxedOneHotCategorical distribution is a log-transformed
RelaxedOneHotCategorical distribution. The RelaxedOneHotCategorical is a
distribution over random probability vectors, vectors of positive real
values that sum to one, which continuously approximates a OneHotCategorical.
The degree of approximation is controlled by a temperature: as the temperature
goes to 0 the RelaxedOneHotCategorical becomes discrete with a distribution
described by the logits, as the temperature goes to infinity the
RelaxedOneHotCategorical becomes the constant distribution that is identically
the constant vector of (1/event_size, ..., 1/event_size).
Because computing log-probabilities of the RelaxedOneHotCategorical can
suffer from underflow issues, this class is one solution for loss
functions that depend on log-probabilities, such as the KL Divergence found
in the variational autoencoder loss. The KL divergence between two
distributions is invariant under invertible transformations, so evaluating
KL divergences of ExpRelaxedOneHotCategorical samples, which are always
followed by a `tf.exp` op, is equivalent to evaluating KL divergences of
RelaxedOneHotCategorical samples. See the appendix of Maddison et al., 2016
for more mathematical details, where this distribution is called the
ExpConcrete.
#### Examples
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution. If those samples
are followed by a `tf.exp` op, then they are distributed as a relaxed onehot
categorical.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = ExpRelaxedOneHotCategorical(temperature, probs=p)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 0 and the
others very negative. The 2nd class is the most likely to be the largest
component in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (-log(3), -log(3), -log(3)) vector.
The 2nd class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
temperature,
logits=None,
probs=None,
dtype=None,
validate_args=False,
allow_nan_stats=True,
name="ExpRelaxedOneHotCategorical"):
"""Initialize ExpRelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of ExpRelaxedCategorical distributions. The temperature should
be positive.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of ExpRelaxedCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of logits for each class. Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of ExpRelaxedCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of probabilities for each
class. Only one of `logits` or `probs` should be passed in.
dtype: The type of the event samples (default: inferred from
logits/probs).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs, temperature]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
if dtype is None:
dtype = self._logits.dtype
if not validate_args:
temperature = math_ops.cast(temperature, dtype)
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._temperature_2d = array_ops.reshape(temperature, [-1, 1],
name="temperature_2d")
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
with ops.name_scope(name="event_size"):
self._event_size = array_ops.shape(self._logits)[-1]
super(ExpRelaxedOneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs,
self._temperature],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def temperature(self):
"""Batchwise temperature tensor of a RelaxedCategorical."""
return self._temperature
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of probabilities summing to one."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._logits)[:-1]
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.logits)[-1:]
def _event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits * array_ops.ones(sample_shape, dtype=self.dtype)
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.shape(logits_2d),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
gumbel = -math_ops.log(-math_ops.log(uniform))
noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)
samples = nn_ops.log_softmax(noisy_logits)
ret = array_ops.reshape(samples, sample_shape)
return ret
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
# compute the normalization constant
k = math_ops.cast(self.event_size, x.dtype)
log_norm_const = (math_ops.lgamma(k)
+ (k - 1.)
* math_ops.log(self.temperature))
# compute the unnormalized density
log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
# combine unnormalized density with normalization constant
log_prob = log_norm_const + log_unnorm_prob
# Reshapes log_prob to be consistent with shape of user-supplied logits
ret = array_ops.reshape(log_prob, logits_shape)
return ret
def _assert_valid_sample(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_positive(x),
check_ops.assert_near(
array_ops.zeros([], dtype=self.dtype),
math_ops.reduce_logsumexp(x, axis=[-1])),
], x)
class RelaxedOneHotCategorical(
transformed_distribution.TransformedDistribution):
"""RelaxedOneHotCategorical distribution with temperature and logits.
The RelaxedOneHotCategorical is a distribution over random probability
vectors, vectors of positive real values that sum to one, which continuously
approximates a OneHotCategorical. The degree of approximation is controlled by
a temperature: as the temperature goes to 0 the RelaxedOneHotCategorical
becomes discrete with a distribution described by the `logits` or `probs`
parameters, as the temperature goes to infinity the RelaxedOneHotCategorical
becomes the constant distribution that is identically the constant vector of
(1/event_size, ..., 1/event_size).
The RelaxedOneHotCategorical distribution was concurrently introduced as the
Gumbel-Softmax (Jang et al., 2016) and Concrete (Maddison et al., 2016)
distributions for use as a reparameterized continuous approximation to the
`Categorical` one-hot distribution. If you use this distribution, please cite
both papers.
#### Examples
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 1 and the
others nearly 0. The 2nd class is the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (1/3, 1/3, 1/3) vector. The 2nd
class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
temperature,
logits=None,
probs=None,
dtype=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedOneHotCategorical"):
"""Initialize RelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedOneHotCategorical distributions. The temperature
should be positive.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of RelaxedOneHotCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of logits for each class. Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of RelaxedOneHotCategorical distributions. The first `N - 1`
dimensions index into a batch of independent distributions and the last
dimension represents a vector of probabilities for each class. Only one
of `logits` or `probs` should be passed in.
dtype: The type of the event samples (default: inferred from
logits/probs).
validate_args: Unused in this distribution.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
dist = ExpRelaxedOneHotCategorical(temperature,
logits=logits,
probs=probs,
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
super(RelaxedOneHotCategorical, self).__init__(dist,
bijectors.Exp(),
name=name)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Negative Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class NegativeBinomial(distribution.Distribution):
"""NegativeBinomial distribution.
The NegativeBinomial distribution is related to the experiment of performing
Bernoulli trials in sequence. Given a Bernoulli trial with probability `p` of
success, the NegativeBinomial distribution represents the distribution over
the number of successes `s` that occur until we observe `f` failures.
The probability mass function (pmf) is,
```none
pmf(s; f, p) = p**s (1 - p)**f / Z
Z = s! (f - 1)! / (s + f - 1)!
```
where:
* `total_count = f`,
* `probs = p`,
* `Z` is the normalizaing constant, and,
* `n!` is the factorial of `n`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="NegativeBinomial"):
"""Construct NegativeBinomial distributions.
Args:
total_count: Non-negative floating-point `Tensor` with shape
broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as
`probs` or `logits`. Defines this as a batch of `N1 x ... x Nm`
different Negative Binomial distributions. In practice, this represents
the number of negative Bernoulli trials to stop at (the `total_count`
of failures), but this is still a valid distribution when
`total_count` is a non-integer.
logits: Floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents logits for the probability of success for
independent Negative Binomial distributions and must be in the open
interval `(-inf, inf)`. Only one of `logits` or `probs` should be
specified.
probs: Positive floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents the probability of success for independent
Negative Binomial distributions and must be in the open interval
`(0, 1)`. Only one of `logits` or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(total_count)] if validate_args else []):
self._total_count = array_ops.identity(total_count)
super(NegativeBinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count, self._probs, self._logits],
name=name)
@property
def total_count(self):
"""Number of negative trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.betainc(self.total_count, 1. + x,
math_ops.sigmoid(-self.logits))
def _log_prob(self, x):
return (self._log_unnormalized_prob(x)
- self._log_normalization(x))
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (self.total_count * math_ops.log_sigmoid(-self.logits)
+ x * math_ops.log_sigmoid(self.logits))
def _log_normalization(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (-math_ops.lgamma(self.total_count + x)
+ math_ops.lgamma(1. + x)
+ math_ops.lgamma(self.total_count))
def _mean(self):
return self.total_count * math_ops.exp(self.logits)
def _mode(self):
adjusted_count = array_ops.where_v2(1. < self.total_count,
self.total_count - 1.,
array_ops.zeros_like(self.total_count))
return math_ops.floor(adjusted_count * math_ops.exp(self.logits))
def _variance(self):
return self._mean() / math_ops.sigmoid(-self.logits)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/negative_binomial.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing distributions and/or bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_ops
__all__ = [
"DiscreteScalarDistributionTestHelpers",
"VectorDistributionTestHelpers",
]
class DiscreteScalarDistributionTestHelpers(object):
"""DiscreteScalarDistributionTestHelpers."""
def run_test_sample_consistent_log_prob(self,
sess_run_fn,
dist,
num_samples=int(1e5),
num_threshold=int(1e3),
seed=42,
batch_size=None,
rtol=1e-2,
atol=0.):
"""Tests that sample/log_prob are consistent with each other.
"Consistency" means that `sample` and `log_prob` correspond to the same
distribution.
Note: this test only verifies a necessary condition for consistency--it does
does not verify sufficiency hence does not prove `sample`, `log_prob` truly
are consistent.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
num_threshold: Python `int` scalar indicating the number of samples a
bucket must contain before being compared to the probability.
Default value: 1e3; must be at least 1. Warning, set too high will cause
test to falsely pass but setting too low will cause the test to
falsely fail.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
batch_size: Hint for unpacking result of samples. Default: `None` means
batch_size is inferred.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
Raises:
ValueError: if `num_threshold < 1`.
"""
if num_threshold < 1:
raise ValueError(
"num_threshold({}) must be at least 1.".format(num_threshold))
# Histogram only supports vectors so we call it once per batch coordinate.
y = dist.sample(num_samples, seed=seed)
y = array_ops.reshape(y, shape=[num_samples, -1])
if batch_size is None:
batch_size = math_ops.reduce_prod(dist.batch_shape_tensor())
batch_dims = array_ops.shape(dist.batch_shape_tensor())[0]
edges_expanded_shape = 1 + array_ops.pad([-2], paddings=[[0, batch_dims]])
for b, x in enumerate(array_ops.unstack(y, num=batch_size, axis=1)):
counts, edges = self.histogram(x)
edges = array_ops.reshape(edges, edges_expanded_shape)
probs = math_ops.exp(dist.log_prob(edges))
probs = array_ops.reshape(probs, shape=[-1, batch_size])[:, b]
[counts_, probs_] = sess_run_fn([counts, probs])
valid = counts_ > num_threshold
probs_ = probs_[valid]
counts_ = counts_[valid]
self.assertAllClose(probs_, counts_ / num_samples, rtol=rtol, atol=atol)
def run_test_sample_consistent_mean_variance(self,
sess_run_fn,
dist,
num_samples=int(1e5),
seed=24,
rtol=1e-2,
atol=0.):
"""Tests that sample/mean/variance are consistent with each other.
"Consistency" means that `sample`, `mean`, `variance`, etc all correspond
to the same distribution.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
"""
x = math_ops.cast(dist.sample(num_samples, seed=seed), dtypes.float32)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_variance = math_ops.reduce_mean(
math_ops.square(x - sample_mean), axis=0)
sample_stddev = math_ops.sqrt(sample_variance)
[sample_mean_, sample_variance_, sample_stddev_, mean_, variance_,
stddev_] = sess_run_fn([
sample_mean,
sample_variance,
sample_stddev,
dist.mean(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(mean_, sample_mean_, rtol=rtol, atol=atol)
self.assertAllClose(variance_, sample_variance_, rtol=rtol, atol=atol)
self.assertAllClose(stddev_, sample_stddev_, rtol=rtol, atol=atol)
def histogram(self, x, value_range=None, nbins=None, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram
counting the number of entries in `values` that fell into every bin. The
bins are equal width and determined by the arguments `value_range` and
`nbins`.
Args:
x: 1D numeric `Tensor` of items to count.
value_range: Shape [2] `Tensor`. `new_values <= value_range[0]` will be
mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
`hist[-1]`. Must be same dtype as `x`.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
name: Python `str` name prefixed to Ops created by this class.
Returns:
counts: 1D `Tensor` of counts, i.e.,
`counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
edges: 1D `Tensor` characterizing intervals used for counting.
"""
with ops.name_scope(name, "histogram", [x]):
x = ops.convert_to_tensor(x, name="x")
if value_range is None:
value_range = [math_ops.reduce_min(x), 1 + math_ops.reduce_max(x)]
value_range = ops.convert_to_tensor(value_range, name="value_range")
lo = value_range[0]
hi = value_range[1]
if nbins is None:
nbins = math_ops.cast(hi - lo, dtypes.int32)
delta = (hi - lo) / math_ops.cast(
nbins, dtype=value_range.dtype.base_dtype)
edges = math_ops.range(
start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
counts = histogram_ops.histogram_fixed_width(
x, value_range=value_range, nbins=nbins)
return counts, edges
class VectorDistributionTestHelpers(object):
"""VectorDistributionTestHelpers helps test vector-event distributions."""
def run_test_sample_consistent_log_prob(self,
sess_run_fn,
dist,
num_samples=int(1e5),
radius=1.,
center=0.,
seed=42,
rtol=1e-2,
atol=0.):
"""Tests that sample/log_prob are mutually consistent.
"Consistency" means that `sample` and `log_prob` correspond to the same
distribution.
The idea of this test is to compute the Monte-Carlo estimate of the volume
enclosed by a hypersphere, i.e., the volume of an `n`-ball. While we could
choose an arbitrary function to integrate, the hypersphere's volume is nice
because it is intuitive, has an easy analytical expression, and works for
`dimensions > 1`.
Technical Details:
Observe that:
```none
int_{R**d} dx [x in Ball(radius=r, center=c)]
= E_{p(X)}[ [X in Ball(r, c)] / p(X) ]
= lim_{m->infty} m**-1 sum_j^m [x[j] in Ball(r, c)] / p(x[j]),
where x[j] ~iid p(X)
```
Thus, for fixed `m`, the above is approximately true when `sample` and
`log_prob` are mutually consistent.
Furthermore, the above calculation has the analytical result:
`pi**(d/2) r**d / Gamma(1 + d/2)`.
Note: this test only verifies a necessary condition for consistency--it does
does not verify sufficiency hence does not prove `sample`, `log_prob` truly
are consistent. For this reason we recommend testing several different
hyperspheres (assuming the hypersphere is supported by the distribution).
Furthermore, we gain additional trust in this test when also tested `sample`
against the first, second moments
(`run_test_sample_consistent_mean_covariance`); it is probably unlikely that
a "best-effort" implementation of `log_prob` would incorrectly pass both
tests and for different hyperspheres.
For a discussion on the analytical result (second-line) see:
https://en.wikipedia.org/wiki/Volume_of_an_n-ball.
For a discussion of importance sampling (fourth-line) see:
https://en.wikipedia.org/wiki/Importance_sampling.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`. The
distribution must have non-zero probability of sampling every point
enclosed by the hypersphere.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
radius: Python `float`-type indicating the radius of the `n`-ball which
we're computing the volume.
center: Python floating-type vector (or scalar) indicating the center of
the `n`-ball which we're computing the volume. When scalar, the value is
broadcast to all event dims.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
actual- and approximate-volumes.
atol: Python `float`-type indicating the admissible absolute error between
actual- and approximate-volumes. In general this should be zero since a
typical radius implies a non-zero volume.
"""
def actual_hypersphere_volume(dims, radius):
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
# Using tf.math.lgamma because we'd have to otherwise use SciPy which is
# not a required dependency of core.
radius = np.asarray(radius)
dims = math_ops.cast(dims, dtype=radius.dtype)
return math_ops.exp((dims / 2.) * np.log(np.pi) -
math_ops.lgamma(1. + dims / 2.) +
dims * math_ops.log(radius))
def is_in_ball(x, radius, center):
return math_ops.cast(
linalg_ops.norm(x - center, axis=-1) <= radius, dtype=x.dtype)
def monte_carlo_hypersphere_volume(dist, num_samples, radius, center):
# https://en.wikipedia.org/wiki/Importance_sampling
x = dist.sample(num_samples, seed=seed)
x = array_ops.identity(x) # Invalidate bijector cacheing.
return math_ops.reduce_mean(
math_ops.exp(-dist.log_prob(x)) * is_in_ball(x, radius, center),
axis=0)
# Build graph.
with ops.name_scope(
"run_test_sample_consistent_log_prob",
values=[num_samples, radius, center] + dist._graph_parents): # pylint: disable=protected-access
batch_shape = dist.batch_shape_tensor()
actual_volume = actual_hypersphere_volume(
dims=dist.event_shape_tensor()[0], radius=radius)
sample_volume = monte_carlo_hypersphere_volume(
dist, num_samples=num_samples, radius=radius, center=center)
init_op = variables_ops.global_variables_initializer()
# Execute graph.
sess_run_fn(init_op)
[batch_shape_, actual_volume_,
sample_volume_] = sess_run_fn([batch_shape, actual_volume, sample_volume])
# Check results.
self.assertAllClose(
np.tile(actual_volume_, reps=batch_shape_),
sample_volume_,
rtol=rtol,
atol=atol)
def run_test_sample_consistent_mean_covariance(self,
sess_run_fn,
dist,
num_samples=int(1e5),
seed=24,
rtol=1e-2,
atol=0.1,
cov_rtol=None,
cov_atol=None):
"""Tests that sample/mean/covariance are consistent with each other.
"Consistency" means that `sample`, `mean`, `covariance`, etc all correspond
to the same distribution.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
cov_rtol: Python `float`-type indicating the admissible relative error
between analytical and sample covariance. Default: rtol.
cov_atol: Python `float`-type indicating the admissible absolute error
between analytical and sample covariance. Default: atol.
"""
x = dist.sample(num_samples, seed=seed)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_covariance = math_ops.reduce_mean(
_vec_outer_square(x - sample_mean), axis=0)
sample_variance = array_ops.matrix_diag_part(sample_covariance)
sample_stddev = math_ops.sqrt(sample_variance)
[
sample_mean_, sample_covariance_, sample_variance_, sample_stddev_,
mean_, covariance_, variance_, stddev_
] = sess_run_fn([
sample_mean,
sample_covariance,
sample_variance,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(mean_, sample_mean_, rtol=rtol, atol=atol)
self.assertAllClose(
covariance_,
sample_covariance_,
rtol=cov_rtol or rtol,
atol=cov_atol or atol)
self.assertAllClose(variance_, sample_variance_, rtol=rtol, atol=atol)
self.assertAllClose(stddev_, sample_stddev_, rtol=rtol, atol=atol)
def _vec_outer_square(x, name=None):
"""Computes the outer-product of a vector, i.e., x.T x."""
with ops.name_scope(name, "vec_osquare", [x]):
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/test_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Cauchy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
__all__ = [
"Cauchy",
]
class Cauchy(distribution.Distribution):
"""The Cauchy distribution with location `loc` and scale `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = 1 / (pi scale (1 + z**2))
z = (x - loc) / scale
```
where `loc` is the location, and `scale` is the scale.
The Cauchy distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e.
`Y ~ Cauchy(loc, scale)` is equivalent to,
```none
X ~ Cauchy(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Cauchy distribution.
dist = tfd.Cauchy(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Cauchy distributions.
dist = tfd.Cauchy(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Arguments are broadcast when possible.
# Define a batch of two scalar valued Cauchy distributions.
# Both have median 1, but different scales.
dist = tfd.Cauchy(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Cauchy"):
"""Construct Cauchy distributions.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the modes of the distribution(s).
scale: Floating point tensor; the locations of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)]
if validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Cauchy, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(self.loc.shape, self.scale.shape)
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
probs = random_ops.random_uniform(
shape=shape, minval=0., maxval=1., dtype=self.dtype, seed=seed)
return self._quantile(probs)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
return math_ops.atan(self._z(x)) / np.pi + 0.5
def _log_cdf(self, x):
return math_ops.log1p(2 / np.pi * math_ops.atan(self._z(x))) - np.log(2)
def _log_unnormalized_prob(self, x):
return -math_ops.log1p(math_ops.square(self._z(x)))
def _log_normalization(self):
return np.log(np.pi) + math_ops.log(self.scale)
def _entropy(self):
h = np.log(4 * np.pi) + math_ops.log(self.scale)
return h * array_ops.ones_like(self.loc)
def _quantile(self, p):
return self.loc + self.scale * math_ops.tan(np.pi * (p - 0.5))
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x`."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
def _mean(self):
if self.allow_nan_stats:
return array_ops.fill(self.batch_shape_tensor(),
self.dtype.as_numpy_dtype(np.nan))
else:
raise ValueError("`mean` is undefined for Cauchy distribution.")
def _stddev(self):
if self.allow_nan_stats:
return array_ops.fill(self.batch_shape_tensor(),
self.dtype.as_numpy_dtype(np.nan))
else:
raise ValueError("`stddev` is undefined for Cauchy distribution.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/cauchy.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Conditional Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import conditional_distribution
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
# pylint: disable=protected-access
_concat_vectors = transformed_distribution._concat_vectors
# pylint: enable=protected-access
__all__ = [
"ConditionalTransformedDistribution",
]
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class ConditionalTransformedDistribution(
conditional_distribution.ConditionalDistribution,
transformed_distribution.TransformedDistribution):
"""A TransformedDistribution that allows intrinsic conditioning."""
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None,
distribution_kwargs=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=sample_shape,
seed=seed,
**distribution_kwargs)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name,
bijector_kwargs=None,
distribution_kwargs=None):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, bijector_kwargs, distribution_kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
bijector_kwargs = bijector_kwargs or {}
y = self.bijector.forward(x, **bijector_kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj,
distribution_kwargs)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
return math_ops.cast(ildj, log_prob.dtype) + log_prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, distribution_kwargs)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
return math_ops.exp(math_ops.cast(ildj, prob.dtype)) * prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _quantile(self, value, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value, **distribution_kwargs)
return self.bijector.forward(inv_cdf, **bijector_kwargs)
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/conditional_transformed_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gumbel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class _Gumbel(distribution.Distribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The cumulative density function of this distribution is,
```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Gumbel"):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -math_ops.exp(-self._z(x))
def _cdf(self, x):
return math_ops.exp(-math_ops.exp(-self._z(x)))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - math_ops.exp(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 1 + math_ops.log(scale) + np.euler_gamma
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6)
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/gumbel.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing statistics of samples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import util
from tensorflow.python.ops.signal import fft_ops
__all__ = [
"auto_correlation",
"percentile",
]
# TODO(langmore) Write separate versions of this for real/complex dtype, taking
# advantage of optimized real-fft ops.
def auto_correlation(
x,
axis=-1,
max_lags=None,
center=True,
normalize=True,
name="auto_correlation"):
"""Auto correlation along one axis.
Given a `1-D` wide sense stationary (WSS) sequence `X`, the auto correlation
`RXX` may be defined as (with `E` expectation and `Conj` complex conjugate)
```
RXX[m] := E{ W[m] Conj(W[0]) } = E{ W[0] Conj(W[-m]) },
W[n] := (X[n] - MU) / S,
MU := E{ X[0] },
S**2 := E{ (X[0] - MU) Conj(X[0] - MU) }.
```
This function takes the viewpoint that `x` is (along one axis) a finite
sub-sequence of a realization of (WSS) `X`, and then uses `x` to produce an
estimate of `RXX[m]` as follows:
After extending `x` from length `L` to `inf` by zero padding, the auto
correlation estimate `rxx[m]` is computed for `m = 0, 1, ..., max_lags` as
```
rxx[m] := (L - m)**-1 sum_n w[n + m] Conj(w[n]),
w[n] := (x[n] - mu) / s,
mu := L**-1 sum_n x[n],
s**2 := L**-1 sum_n (x[n] - mu) Conj(x[n] - mu)
```
The error in this estimate is proportional to `1 / sqrt(len(x) - m)`, so users
often set `max_lags` small enough so that the entire output is meaningful.
Note that since `mu` is an imperfect estimate of `E{ X[0] }`, and we divide by
`len(x) - m` rather than `len(x) - m - 1`, our estimate of auto correlation
contains a slight bias, which goes to zero as `len(x) - m --> infinity`.
Args:
x: `float32` or `complex64` `Tensor`.
axis: Python `int`. The axis number along which to compute correlation.
Other dimensions index different batch members.
max_lags: Positive `int` tensor. The maximum value of `m` to consider
(in equation above). If `max_lags >= x.shape[axis]`, we effectively
re-set `max_lags` to `x.shape[axis] - 1`.
center: Python `bool`. If `False`, do not subtract the mean estimate `mu`
from `x[n]` when forming `w[n]`.
normalize: Python `bool`. If `False`, do not divide by the variance
estimate `s**2` when forming `w[n]`.
name: `String` name to prepend to created ops.
Returns:
`rxx`: `Tensor` of same `dtype` as `x`. `rxx.shape[i] = x.shape[i]` for
`i != axis`, and `rxx.shape[axis] = max_lags + 1`.
Raises:
TypeError: If `x` is not a supported type.
"""
# Implementation details:
# Extend length N / 2 1-D array x to length N by zero padding onto the end.
# Then, set
# F[x]_k := sum_n x_n exp{-i 2 pi k n / N }.
# It is not hard to see that
# F[x]_k Conj(F[x]_k) = F[R]_k, where
# R_m := sum_n x_n Conj(x_{(n - m) mod N}).
# One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m].
# Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT
# based version of estimating RXX.
# Note that this is a special case of the Wiener-Khinchin Theorem.
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# Rotate dimensions of x in order to put axis at the rightmost dim.
# FFT op requires this.
rank = util.prefer_static_rank(x)
if axis < 0:
axis = rank + axis
shift = rank - 1 - axis
# Suppose x.shape[axis] = T, so there are T "time" steps.
# ==> x_rotated.shape = B + [T],
# where B is x_rotated's batch shape.
x_rotated = util.rotate_transpose(x, shift)
if center:
x_rotated -= math_ops.reduce_mean(x_rotated, axis=-1, keepdims=True)
# x_len = N / 2 from above explanation. The length of x along axis.
# Get a value for x_len that works in all cases.
x_len = util.prefer_static_shape(x_rotated)[-1]
# TODO(langmore) Investigate whether this zero padding helps or hurts. At
# the moment is necessary so that all FFT implementations work.
# Zero pad to the next power of 2 greater than 2 * x_len, which equals
# 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2).
x_len_float64 = math_ops.cast(x_len, np.float64)
target_length = math_ops.pow(
np.float64(2.),
math_ops.ceil(math_ops.log(x_len_float64 * 2) / np.log(2.)))
pad_length = math_ops.cast(target_length - x_len_float64, np.int32)
# We should have:
# x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length]
# = B + [T + pad_length]
x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length)
dtype = x.dtype
if not dtype.is_complex:
if not dtype.is_floating:
raise TypeError("Argument x must have either float or complex dtype"
" found: {}".format(dtype))
x_rotated_pad = math_ops.complex(x_rotated_pad,
dtype.real_dtype.as_numpy_dtype(0.))
# Autocorrelation is IFFT of power-spectral density (up to some scaling).
fft_x_rotated_pad = fft_ops.fft(x_rotated_pad)
spectral_density = fft_x_rotated_pad * math_ops.conj(fft_x_rotated_pad)
# shifted_product is R[m] from above detailed explanation.
# It is the inner product sum_n X[n] * Conj(X[n - m]).
shifted_product = fft_ops.ifft(spectral_density)
# Cast back to real-valued if x was real to begin with.
shifted_product = math_ops.cast(shifted_product, dtype)
# Figure out if we can deduce the final static shape, and set max_lags.
# Use x_rotated as a reference, because it has the time dimension in the far
# right, and was created before we performed all sorts of crazy shape
# manipulations.
know_static_shape = True
if not x_rotated.shape.is_fully_defined():
know_static_shape = False
if max_lags is None:
max_lags = x_len - 1
else:
max_lags = ops.convert_to_tensor(max_lags, name="max_lags")
max_lags_ = tensor_util.constant_value(max_lags)
if max_lags_ is None or not know_static_shape:
know_static_shape = False
max_lags = math_ops.minimum(x_len - 1, max_lags)
else:
max_lags = min(x_len - 1, max_lags_)
# Chop off the padding.
# We allow users to provide a huge max_lags, but cut it off here.
# shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags]
shifted_product_chopped = shifted_product[..., :max_lags + 1]
# If possible, set shape.
if know_static_shape:
chopped_shape = x_rotated.shape.as_list()
chopped_shape[-1] = min(x_len, max_lags + 1)
shifted_product_chopped.set_shape(chopped_shape)
# Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The
# other terms were zeros arising only due to zero padding.
# `denominator = (N / 2 - m)` (defined below) is the proper term to
# divide by to make this an unbiased estimate of the expectation
# E[X[n] Conj(X[n - m])].
x_len = math_ops.cast(x_len, dtype.real_dtype)
max_lags = math_ops.cast(max_lags, dtype.real_dtype)
denominator = x_len - math_ops.range(0., max_lags + 1.)
denominator = math_ops.cast(denominator, dtype)
shifted_product_rotated = shifted_product_chopped / denominator
if normalize:
shifted_product_rotated /= shifted_product_rotated[..., :1]
# Transpose dimensions back to those of x.
return util.rotate_transpose(shifted_product_rotated, -shift)
# TODO(langmore) To make equivalent to numpy.percentile:
# Make work with a sequence of floats or single float for 'q'.
# Make work with "linear", "midpoint" interpolation. (linear should be default)
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, values=[x, q]):
x = ops.convert_to_tensor(x, name="x")
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = math_ops.cast(q, dtypes.float64, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0),
check_ops.assert_greater_equal(q, math_ops.cast(0., dtypes.float64)),
check_ops.assert_less_equal(q, math_ops.cast(100., dtypes.float64))
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
else:
axis = ops.convert_to_tensor(axis, name="axis")
check_ops.assert_integer(axis)
axis_ndims = _get_static_ndims(
axis, expect_static=True, expect_ndims_no_more_than=1)
axis_const = tensor_util.constant_value(axis)
if axis_const is None:
raise ValueError(
"Expected argument 'axis' to be statically available. Found: %s" %
axis)
axis = axis_const
if axis_ndims == 0:
axis = [axis]
axis = [int(a) for a in axis]
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.cast(array_ops.shape(y)[-1], dtypes.float64)
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
elif interpolation == "higher":
index = math_ops.floor((d - 1) * frac_at_q_or_above)
elif interpolation == "nearest":
index = math_ops.round((d - 1) * frac_at_q_or_above)
# If d is gigantic, then we would have d == d - 1, even in double... So
# let's use max/min to avoid out of bounds errors.
d = array_ops.shape(y)[-1]
# d - 1 will be distinct from d in int32.
index = clip_ops.clip_by_value(math_ops.cast(index, dtypes.int32), 0, d - 1)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
# result.shape = B
result = sorted_y[..., index]
result.set_shape(y.get_shape()[:-1])
if keep_dims:
if axis is None:
# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
ones_vec = array_ops.ones(
shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
result *= array_ops.ones(ones_vec, dtype=x.dtype)
else:
result = _insert_back_keep_dims(result, axis)
return result
def _get_static_ndims(x,
expect_static=False,
expect_ndims=None,
expect_ndims_no_more_than=None,
expect_ndims_at_least=None):
"""Get static number of dimensions and assert that some expectations are met.
This function returns the number of dimensions "ndims" of x, as a Python int.
The optional expect arguments are used to check the ndims of x, but this is
only done if the static ndims of x is not None.
Args:
x: A Tensor.
expect_static: Expect `x` to have statically defined `ndims`.
expect_ndims: Optional Python integer. If provided, assert that x has
number of dimensions equal to this.
expect_ndims_no_more_than: Optional Python integer. If provided, assert
that x has no more than this many dimensions.
expect_ndims_at_least: Optional Python integer. If provided, assert that
x has at least this many dimensions.
Returns:
ndims: A Python integer.
Raises:
ValueError: If any of the expectations above are violated.
"""
ndims = x.get_shape().ndims
if ndims is None:
shape_const = tensor_util.constant_value(array_ops.shape(x))
if shape_const is not None:
ndims = shape_const.ndim
if ndims is None:
if expect_static:
raise ValueError(
"Expected argument 'x' to have statically defined 'ndims'. Found: " %
x)
return
if expect_ndims is not None:
ndims_message = ("Expected argument 'x' to have ndims %s. Found tensor %s"
% (expect_ndims, x))
if ndims != expect_ndims:
raise ValueError(ndims_message)
if expect_ndims_at_least is not None:
ndims_at_least_message = (
"Expected argument 'x' to have ndims >= %d. Found tensor %s" % (
expect_ndims_at_least, x))
if ndims < expect_ndims_at_least:
raise ValueError(ndims_at_least_message)
if expect_ndims_no_more_than is not None:
ndims_no_more_than_message = (
"Expected argument 'x' to have ndims <= %d. Found tensor %s" % (
expect_ndims_no_more_than, x))
if ndims > expect_ndims_no_more_than:
raise ValueError(ndims_no_more_than_message)
return ndims
def _get_best_effort_ndims(x,
expect_ndims=None,
expect_ndims_at_least=None,
expect_ndims_no_more_than=None):
"""Get static ndims if possible. Fallback on `tf.rank(x)`."""
ndims_static = _get_static_ndims(
x,
expect_ndims=expect_ndims,
expect_ndims_at_least=expect_ndims_at_least,
expect_ndims_no_more_than=expect_ndims_no_more_than)
if ndims_static is not None:
return ndims_static
return array_ops.rank(x)
def _insert_back_keep_dims(x, axis):
"""Insert the dims in `axis` back as singletons after being removed.
Args:
x: `Tensor`.
axis: Python list of integers.
Returns:
`Tensor` with same values as `x`, but additional singleton dimensions.
"""
for i in sorted(axis):
x = array_ops.expand_dims(x, axis=i)
return x
def _make_static_axis_non_negative(axis, ndims):
"""Convert possibly negatively indexed axis to non-negative.
Args:
axis: Iterable over Python integers.
ndims: Number of dimensions into which axis indexes.
Returns:
A list of non-negative Python integers.
Raises:
ValueError: If values in `axis` are too big/small to index into `ndims`.
"""
non_negative_axis = []
for d in axis:
if d >= 0:
if d >= ndims:
raise ValueError("dim %d not in the interval [0, %d]." % (d, ndims - 1))
non_negative_axis.append(d)
else:
if d < -1 * ndims:
raise ValueError(
"Negatively indexed dim %d not in the interval [-%d, -1]" % (d,
ndims))
non_negative_axis.append(ndims + d)
return non_negative_axis
def _move_dims_to_flat_end(x, axis, x_ndims):
"""Move dims corresponding to `axis` in `x` to the end, then flatten.
Args:
x: `Tensor` with shape `[B0,B1,...,Bb]`.
axis: Python list of indices into dimensions of `x`.
x_ndims: Python integer holding number of dimensions in `x`.
Returns:
`Tensor` with value from `x` and dims in `axis` moved to end into one single
dimension.
"""
# Suppose x.shape = [a, b, c, d]
# Suppose axis = [1, 3]
# front_dims = [0, 2] in example above.
front_dims = sorted(set(range(x_ndims)).difference(axis))
# x_permed.shape = [a, c, b, d]
x_permed = array_ops.transpose(x, perm=front_dims + list(axis))
if x.get_shape().is_fully_defined():
x_shape = x.get_shape().as_list()
# front_shape = [a, c], end_shape = [b * d]
front_shape = [x_shape[i] for i in front_dims]
end_shape = [np.prod([x_shape[i] for i in axis])]
full_shape = front_shape + end_shape
else:
front_shape = array_ops.shape(x_permed)[:x_ndims - len(axis)]
end_shape = [-1]
full_shape = array_ops.concat([front_shape, end_shape], axis=0)
return array_ops.reshape(x_permed, shape=full_shape)
def _sort_tensor(tensor):
"""Use `top_k` to sort a `Tensor` along the last dimension."""
sorted_, _ = nn_ops.top_k(tensor, k=array_ops.shape(tensor)[-1])
return sorted_
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/sample_stats.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-dimensional (Vector) SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
__all__ = [
"VectorSinhArcsinhDiag",
]
class VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):
"""The (diagonal) SinhArcsinh transformation of a distribution on `R^k`.
This distribution models a random vector `Y = (Y1,...,Yk)`, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given iid random vector `Z = (Z1,...,Zk)`, we define the VectorSinhArcsinhDiag
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation (with `@` denoting
matrix multiplication):
```
Y := loc + scale @ F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale @ Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale @ Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct VectorSinhArcsinhDiag distribution on `R^k`.
The arguments `scale_diag` and `scale_identity_multiplier` combine to
define the diagonal `scale` referred to in this class docstring:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scale-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scale
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale`
is the `Identity`.
skewness: Skewness parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
tailweight: Tailweight parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
distribution: `tf.Distribution`-like instance. Distribution from which `k`
iid samples are used as input to transformation `F`. Default is
`tfp.distributions.Normal(loc=0., scale=1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorSinhArcsinhDiag sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(
name,
values=[
loc, scale_diag, scale_identity_multiplier, skewness, tailweight
]) as name:
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
# Construct shapes and 'scale' out of the scale_* and loc kwargs.
# scale_linop is only an intermediary to:
# 1. get shapes from looking at loc and the two scale args.
# 2. combine scale_diag with scale_identity_multiplier, which gives us
# 'scale', which in turn gives us 'C'.
scale_linop = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale_linop)
# scale_linop.diag_part() is efficient since it is a diag type linop.
scale_diag_part = scale_linop.diag_part()
dtype = scale_diag_part.dtype
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
scale_diag_part = control_flow_ops.with_dependencies(
asserts, scale_diag_part)
# Make the SAS bijector, 'F'.
skewness = ops.convert_to_tensor(skewness, dtype=dtype, name="skewness")
tailweight = ops.convert_to_tensor(
tailweight, dtype=dtype, name="tailweight")
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight)
# Make the Affine bijector, Z --> loc + C * Z.
c = 2 * scale_diag_part / f_noskew.forward(
ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.Affine(
shift=loc, scale_diag=c, validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(VectorSinhArcsinhDiag, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale_linop
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal distribution: conjugate posterior closed form calculations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal
def normal_conjugates_known_scale_posterior(prior, scale, s, n):
"""Posterior Normal distribution with conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale posterior" is
the distribution of the unknown `loc`.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Returns a posterior (also Normal) distribution object, with parameters
`(loc', scale'**2)`, where:
```
mu ~ N(mu', sigma'**2)
sigma'**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2.
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal posterior distribution object for the unknown observation
mean `loc`.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = math_ops.cast(n, prior.dtype)
scale0_2 = math_ops.square(prior.scale)
scale_2 = math_ops.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2,
scale=math_ops.sqrt(scalep_2))
def normal_conjugates_known_scale_predictive(prior, scale, s, n):
"""Posterior predictive Normal distribution w. conjugate prior on the mean.
This model assumes that `n` observations (with sum `s`) come from a
Normal with unknown mean `loc` (described by the Normal `prior`)
and known variance `scale**2`. The "known scale predictive"
is the distribution of new observations, conditioned on the existing
observations and our prior.
Accepts a prior Normal distribution object, having parameters
`loc0` and `scale0`, as well as known `scale` values of the predictive
distribution(s) (also assumed Normal),
and statistical estimates `s` (the sum(s) of the observations) and
`n` (the number(s) of observations).
Calculates the Normal distribution(s) `p(x | sigma**2)`:
```
p(x | sigma**2) = int N(x | mu, sigma**2)N(mu | prior.loc, prior.scale**2) dmu
= N(x | prior.loc, 1 / (sigma**2 + prior.scale**2))
```
Returns the predictive posterior distribution object, with parameters
`(loc', scale'**2)`, where:
```
sigma_n**2 = 1/(1/sigma0**2 + n/sigma**2),
mu' = (mu0/sigma0**2 + s/sigma**2) * sigma_n**2.
sigma'**2 = sigma_n**2 + sigma**2,
```
Distribution parameters from `prior`, as well as `scale`, `s`, and `n`.
will broadcast in the case of multidimensional sets of parameters.
Args:
prior: `Normal` object of type `dtype`:
the prior distribution having parameters `(loc0, scale0)`.
scale: tensor of type `dtype`, taking values `scale > 0`.
The known stddev parameter(s).
s: Tensor of type `dtype`. The sum(s) of observations.
n: Tensor of type `int`. The number(s) of observations.
Returns:
A new Normal predictive distribution object.
Raises:
TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a
Normal object.
"""
if not isinstance(prior, normal.Normal):
raise TypeError("Expected prior to be an instance of type Normal")
if s.dtype != prior.dtype:
raise TypeError(
"Observation sum s.dtype does not match prior dtype: %s vs. %s"
% (s.dtype, prior.dtype))
n = math_ops.cast(n, prior.dtype)
scale0_2 = math_ops.square(prior.scale)
scale_2 = math_ops.square(scale)
scalep_2 = 1.0/(1/scale0_2 + n/scale_2)
return normal.Normal(
loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2,
scale=math_ops.sqrt(scalep_2 + scale_2))
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/normal_conjugate_posteriors.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusScale",
]
class MultivariateNormalDiag(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_diag=[1, 2.])
mvn.mean().eval()
# ==> [1., -1]
mvn.stddev().eval()
# ==> [1., 2]
# Evaluate this on an observation in `R^2`, returning a scalar.
mvn.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity Gaussian.
mvn = tfd.MultivariateNormalDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
mvn.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
mvn.stddev().eval() # shape: [3, 2]
# ==> [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
mvn.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate Gaussians.
mvn = tfd.MultivariateNormalDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(MultivariateNormalDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class MultivariateNormalDiagWithSoftplusScale(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale_diag,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale_diag]) as name:
super(MultivariateNormalDiagWithSoftplusScale, self).__init__(
loc=loc,
scale_diag=nn.softplus(scale_diag),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mvn_diag.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution class initialized with a full covariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import mvn_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalFullCovariance",
]
class MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`covariance_matrix` matrices that are the covariance.
This is different than the other multivariate normals, which are parameterized
by a matrix more akin to the standard deviation.
#### Mathematical Details
The probability density function (pdf) is, with `@` as matrix multiplication,
```none
pdf(x; loc, covariance_matrix) = exp(-0.5 y) / Z,
y = (x - loc)^T @ inv(covariance_matrix) @ (x - loc)
Z = (2 pi)**(0.5 k) |det(covariance_matrix)|**(0.5).
```
where:
* `loc` is a vector in `R^k`,
* `covariance_matrix` is an `R^{k x k}` symmetric positive definite matrix,
* `Z` denotes the normalization constant.
Additional leading dimensions (if any) in `loc` and `covariance_matrix` allow
for batch dimensions.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed e.g. as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
scale = Cholesky(covariance_matrix)
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance_matrix=cov)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with covariance_matrix.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
covariance_matrix = ... # shape: [2, 3, 3], symmetric, positive definite.
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance=covariance_matrix)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
covariance_matrix=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFullCovariance"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and
`covariance_matrix` arguments.
The `event_shape` is given by last dimension of the matrix implied by
`covariance_matrix`. The last dimension of `loc` (if provided) must
broadcast with this.
A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive
definite matrix. In other words it is (real) symmetric with all eigenvalues
strictly positive.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
covariance_matrix: Floating-point, symmetric positive definite `Tensor` of
same `dtype` as `loc`. The strict upper triangle of `covariance_matrix`
is ignored, so if `covariance_matrix` is not symmetric no error will be
raised (unless `validate_args is True`). `covariance_matrix` has shape
`[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `covariance_matrix` are specified.
"""
parameters = dict(locals())
# Convert the covariance_matrix up to a scale_tril and call MVNTriL.
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, covariance_matrix]):
if covariance_matrix is None:
scale_tril = None
else:
covariance_matrix = ops.convert_to_tensor(
covariance_matrix, name="covariance_matrix")
if validate_args:
covariance_matrix = control_flow_ops.with_dependencies([
check_ops.assert_near(
covariance_matrix,
array_ops.matrix_transpose(covariance_matrix),
message="Matrix was not symmetric")], covariance_matrix)
# No need to validate that covariance_matrix is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular method that
# is called by the Bijector.
# However, cholesky() ignores the upper triangular part, so we do need
# to separately assert symmetric.
scale_tril = linalg_ops.cholesky(covariance_matrix)
super(MultivariateNormalFullCovariance, self).__init__(
loc=loc,
scale_tril=scale_tril,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Exponential distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import exponential
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = ["VectorExponentialLinearOperator"]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorExponentialLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in S(loc, scale),
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `S = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`, is an image of
the positive half-space,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`,
* `Z` denotes the normalization constant.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
mat = [[1.0, 0.1],
[0.1, 1.0]]
vex = tfd.VectorExponentialLinearOperator(
scale=tf.linalg.LinearOperatorFullMatrix(mat))
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([1., 2.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
mu = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = tfd.VectorExponentialLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialLinearOperator"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorExponentialLinearOperator, self).__init__(
distribution=exponential.Exponential(rate=array_ops.ones(
[], dtype=scale.dtype), allow_nan_stats=allow_nan_stats),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorExponentialLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorExponentialLinearOperator, self)._prob(x)
def _mean(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then E[X] = loc + L1, where 1 is the vector of ones.
scale_x_ones = self.bijector.scale.matvec(
array_ops.ones(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_ones
return array_ops.identity(self.loc) + scale_x_ones
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Exponential(0, 1).
# Then this distribution is
# X = loc + LW,
# and then since Cov(wi, wj) = 1 if i=j, and 0 otherwise,
# Cov(X) = L Cov(W W^T) L^T = L L^T.
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(
array_ops.matrix_diag_part(self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(
array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
scale_x_zeros = self.bijector.scale.matvec(
array_ops.zeros(self._mode_mean_shape(), self.dtype))
if self.loc is None:
return scale_x_zeros
return array_ops.identity(self.loc) + scale_x_zeros
def _mode_mean_shape(self):
"""Shape for the mode/mean Tensors."""
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
return shape
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/vector_exponential_linear_operator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The same-family Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class MixtureSameFamily(distribution.Distribution):
"""Mixture (same-family) distribution.
The `MixtureSameFamily` distribution implements a (batch of) mixture
distribution where all components are from different parameterizations of the
same distribution type. It is parameterized by a `Categorical` "selecting
distribution" (over `k` components) and a components distribution, i.e., a
`Distribution` with a rightmost batch shape (equal to `[k]`) which indexes
each (batch of) component.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
### Create a mixture of two scalar Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.Normal(
loc=[-1., 1], # One for each component.
scale=[0.1, 0.5])) # And same here.
gm.mean()
# ==> 0.4
gm.variance()
# ==> 1.018
# Plot PDF.
x = np.linspace(-2., 3., int(1e4), dtype=np.float32)
import matplotlib.pyplot as plt
plt.plot(x, gm.prob(x).eval());
### Create a mixture of two Bivariate Gaussians:
gm = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[0.3, 0.7]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], # component 1
[1, -1]], # component 2
scale_identity_multiplier=[.3, .6]))
gm.mean()
# ==> array([ 0.4, -0.4], dtype=float32)
gm.covariance()
# ==> array([[ 1.119, -0.84],
# [-0.84, 1.119]], dtype=float32)
# Plot PDF contours.
def meshgrid(x, y=x):
[gx, gy] = np.meshgrid(x, y, indexing='ij')
gx, gy = np.float32(gx), np.float32(gy)
grid = np.concatenate([gx.ravel()[None, :], gy.ravel()[None, :]], axis=0)
return grid.T.reshape(x.size, y.size, 2)
grid = meshgrid(np.linspace(-2, 2, 100, dtype=np.float32))
plt.contour(grid[..., 0], grid[..., 1], gm.prob(grid).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mixture_distribution,
components_distribution,
validate_args=False,
allow_nan_stats=True,
name="MixtureSameFamily"):
"""Construct a `MixtureSameFamily` distribution.
Args:
mixture_distribution: `tfp.distributions.Categorical`-like instance.
Manages the probability of selecting components. The number of
categories must match the rightmost batch dimension of the
`components_distribution`. Must have either scalar `batch_shape` or
`batch_shape` matching `components_distribution.batch_shape[:-1]`.
components_distribution: `tfp.distributions.Distribution`-like instance.
Right-most batch dimension indexes components.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: `if not mixture_distribution.dtype.is_integer`.
ValueError: if mixture_distribution does not have scalar `event_shape`.
ValueError: if `mixture_distribution.batch_shape` and
`components_distribution.batch_shape[:-1]` are both fully defined and
the former is neither scalar nor equal to the latter.
ValueError: if `mixture_distribution` categories does not equal
`components_distribution` rightmost batch shape.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
self._runtime_assertions = []
s = components_distribution.event_shape_tensor()
s_dim0 = tensor_shape.dimension_value(s.shape[0])
self._event_ndims = (s_dim0
if s_dim0 is not None
else array_ops.shape(s)[0])
if not mixture_distribution.dtype.is_integer:
raise ValueError(
"`mixture_distribution.dtype` ({}) is not over integers".format(
mixture_distribution.dtype.name))
if (mixture_distribution.event_shape.ndims is not None
and mixture_distribution.event_shape.ndims != 0):
raise ValueError("`mixture_distribution` must have scalar `event_dim`s")
elif validate_args:
self._runtime_assertions += [
control_flow_ops.assert_has_rank(
mixture_distribution.event_shape_tensor(), 0,
message="`mixture_distribution` must have scalar `event_dim`s"),
]
mdbs = mixture_distribution.batch_shape
cdbs = components_distribution.batch_shape.with_rank_at_least(1)[:-1]
if mdbs.is_fully_defined() and cdbs.is_fully_defined():
if mdbs.ndims != 0 and mdbs != cdbs:
raise ValueError(
"`mixture_distribution.batch_shape` (`{}`) is not "
"compatible with `components_distribution.batch_shape` "
"(`{}`)".format(mdbs.as_list(), cdbs.as_list()))
elif validate_args:
mdbs = mixture_distribution.batch_shape_tensor()
cdbs = components_distribution.batch_shape_tensor()[:-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
distribution_util.pick_vector(
mixture_distribution.is_scalar_batch(), cdbs, mdbs),
cdbs,
message=(
"`mixture_distribution.batch_shape` is not "
"compatible with `components_distribution.batch_shape`"))]
km = tensor_shape.dimension_value(
mixture_distribution.logits.shape.with_rank_at_least(1)[-1])
kc = tensor_shape.dimension_value(
components_distribution.batch_shape.with_rank_at_least(1)[-1])
if km is not None and kc is not None and km != kc:
raise ValueError("`mixture_distribution components` ({}) does not "
"equal `components_distribution.batch_shape[-1]` "
"({})".format(km, kc))
elif validate_args:
km = array_ops.shape(mixture_distribution.logits)[-1]
kc = components_distribution.batch_shape_tensor()[-1]
self._runtime_assertions += [
control_flow_ops.assert_equal(
km, kc,
message=("`mixture_distribution components` does not equal "
"`components_distribution.batch_shape[-1:]`")),
]
elif km is None:
km = array_ops.shape(mixture_distribution.logits)[-1]
self._num_components = km
super(MixtureSameFamily, self).__init__(
dtype=self._components_distribution.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
self._mixture_distribution._graph_parents # pylint: disable=protected-access
+ self._components_distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def components_distribution(self):
return self._components_distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.batch_shape_tensor()[:-1]
def _batch_shape(self):
return self.components_distribution.batch_shape.with_rank_at_least(1)[:-1]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return self.components_distribution.event_shape_tensor()
def _event_shape(self):
return self.components_distribution.event_shape
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
x = self.components_distribution.sample(n) # [n, B, k, E]
# TODO(jvdillon): Consider using tf.gather (by way of index unrolling).
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=self.mixture_distribution.sample(n), # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self.mixture_distribution,
self._event_shape().ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask, axis=-1 - self._event_ndims) # [n, B, E]
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
x = self._pad_sample_dims(x)
log_prob_x = self.components_distribution.log_prob(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_prob_x + log_mix_prob, axis=-1) # [S, B]
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
return math_ops.reduce_sum(
probs * self.components_distribution.mean(),
axis=-1 - self._event_ndims) # [B, E]
def _log_cdf(self, x):
x = self._pad_sample_dims(x)
log_cdf_x = self.components_distribution.log_cdf(x) # [S, B, k]
log_mix_prob = nn_ops.log_softmax(
self.mixture_distribution.logits, axis=-1) # [B, k]
return math_ops.reduce_logsumexp(
log_cdf_x + log_mix_prob, axis=-1) # [S, B]
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, [1]*e]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.variance(),
axis=-1 - self._event_ndims) # [B, E]
var_cond_mean = math_ops.reduce_sum(
probs * math_ops.squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-1 - self._event_ndims) # [B, E]
return mean_cond_var + var_cond_mean # [B, E]
def _covariance(self):
static_event_ndims = self.event_shape.ndims
if static_event_ndims != 1:
# Covariance is defined only for vector distributions.
raise NotImplementedError("covariance is not implemented")
with ops.control_dependencies(self._runtime_assertions):
# Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X])
probs = distribution_utils.pad_mixture_dimensions(
distribution_utils.pad_mixture_dimensions(
self.mixture_distribution.probs, self, self.mixture_distribution,
self._event_shape().ndims),
self, self.mixture_distribution,
self._event_shape().ndims) # [B, k, 1, 1]
mean_cond_var = math_ops.reduce_sum(
probs * self.components_distribution.covariance(),
axis=-3) # [B, e, e]
var_cond_mean = math_ops.reduce_sum(
probs * _outer_squared_difference(
self.components_distribution.mean(),
self._pad_sample_dims(self._mean())),
axis=-3) # [B, e, e]
return mean_cond_var + var_cond_mean # [B, e, e]
def _pad_sample_dims(self, x):
with ops.name_scope("pad_sample_dims", values=[x]):
ndims = x.shape.ndims if x.shape.ndims is not None else array_ops.rank(x)
shape = array_ops.shape(x)
d = ndims - self._event_ndims
x = array_ops.reshape(x, shape=array_ops.concat([
shape[:d], [1], shape[d:]], axis=0))
return x
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _outer_squared_difference(x, y):
"""Convenience function analogous to tf.squared_difference."""
z = x - y
return z[..., array_ops.newaxis, :] * z[..., array_ops.newaxis]
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mixture_same_family.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
#### Terminology
Recall that a `Tensor` has:
- `shape`: size of `Tensor` dimensions,
- `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
`Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
- `sample_dims`: indexes independent draws from identical
parameterizations of the `Distribution`.
- `batch_dims`: indexes independent draws from non-identical
parameterizations of the `Distribution`.
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
`Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
#### Purpose
This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
```
sample_shape batch_shape event_shape
sample_dims batch_dims event_dims
sample_ndims batch_ndims event_ndims
```
for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
#### Examples
We show examples of distribution shape semantics.
- Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
```python
sample_dims = [0]
tf.reduce_mean(Normal(loc=1.3, scale=1.).sample_n(1000),
axis=sample_dims) # ~= 1.3
```
- Batch dimensions:
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws from a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a `Normal` with a random `Laplace` location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0., 1.).sample_n(n=1000),
scale=tf.ones(1000)).prob(x),
axis=batch_dims)
```
The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `prob(x)` yields
1000 probabilities, one for every location. The average over this batch
yields the marginal.
- Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
E.g., Jacobian of the transform `Y = g(X) = exp(X)`:
```python
tf.compat.v1.div(1., tf.reduce_prod(x, event_dims))
```
We show examples using this class.
Write `S, B, E` for `sample_shape`, `batch_shape`, and `event_shape`.
```python
# 150 iid samples from one multivariate Normal with two degrees of freedom.
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
mvn = MultivariateNormal(mu, sigma)
rand_mvn = mvn.sample(sample_shape=[3, 50])
shaper = DistributionShape(batch_ndims=0, event_ndims=1)
S, B, E = shaper.get_shape(rand_mvn)
# S = [3, 50]
# B = []
# E = [2]
# 12 iid samples from one Wishart with 2x2 events.
sigma = [[1., 0],
[2, 1]]
wishart = Wishart(df=5, scale=sigma)
rand_wishart = wishart.sample(sample_shape=[3, 4])
shaper = DistributionShape(batch_ndims=0, event_ndims=2)
S, B, E = shaper.get_shape(rand_wishart)
# S = [3, 4]
# B = []
# E = [2, 2]
# 100 iid samples from two, non-identical trivariate Normal distributions.
mu = ... # shape(2, 3)
sigma = ... # shape(2, 3, 3)
X = MultivariateNormal(mu, sigma).sample(shape=[4, 25])
# S = [4, 25]
# B = [2]
# E = [3]
```
#### Argument Validation
When `validate_args=False`, checks that cannot be done during
graph construction are performed at graph execution. This may result in a
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
non-constant `Tensor`, it is checked to be a non-negative integer at graph
execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
batch_ndims=None,
event_ndims=None,
validate_args=False,
name="DistributionShape"):
"""Construct `DistributionShape` with fixed `batch_ndims`, `event_ndims`.
`batch_ndims` and `event_ndims` are fixed throughout the lifetime of a
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: Python `bool`, default `False`. When `True`,
non-`tf.constant` `Tensor` arguments are checked for correctness.
(`tf.constant` arguments are always checked.)
name: Python `str`. The name prepended to Ops created by this class.
Raises:
ValueError: if either `batch_ndims` or `event_ndims` are: `None`,
negative, not `int32`.
"""
if batch_ndims is None: raise ValueError("batch_ndims cannot be None")
if event_ndims is None: raise ValueError("event_ndims cannot be None")
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
self._validate_args = validate_args
with ops.name_scope(name):
self._name = name
with ops.name_scope("init"):
self._batch_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
batch_ndims, name="batch_ndims"))
self._batch_ndims_static, self._batch_ndims_is_0 = (
self._introspect_ndims(self._batch_ndims))
self._event_ndims = self._assert_non_negative_int32_scalar(
ops.convert_to_tensor(
event_ndims, name="event_ndims"))
self._event_ndims_static, self._event_ndims_is_0 = (
self._introspect_ndims(self._event_ndims))
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
@property
def validate_args(self):
"""Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def get_sample_ndims(self, x, name="get_sample_ndims"):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
"""
with self._name_scope(name, values=[x]):
ndims = self.get_ndims(x, name=name)
if self._is_all_constant_helper(ndims, self.batch_ndims,
self.event_ndims):
ndims = tensor_util.constant_value(ndims)
sample_ndims = (ndims - self._batch_ndims_static -
self._event_ndims_static)
if sample_ndims < 0:
raise ValueError(
"expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" %
(self._batch_ndims_static, self._event_ndims_static, ndims))
return ops.convert_to_tensor(sample_ndims, name="sample_ndims")
else:
with ops.name_scope(name="sample_ndims"):
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if self.validate_args:
sample_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(sample_ndims)], sample_ndims)
return sample_ndims
def get_dims(self, x, name="get_dims"):
"""Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`.
Example:
```python
x = ... # Tensor with shape [4, 3, 2, 1]
sample_dims, batch_dims, event_dims = _DistributionShape(
batch_ndims=2, event_ndims=1).get_dims(x)
# sample_dims == [0]
# batch_dims == [1, 2]
# event_dims == [3]
# Note that these are not the shape parts, but rather indexes into shape.
```
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_dims: `Tensor` (1D, `int32`).
batch_dims: `Tensor` (1D, `int32`).
event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(
list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
sample_ndims = self.get_sample_ndims(x, name=name)
return (make_dims([], sample_ndims, name="sample_dims"),
make_dims([sample_ndims], self.batch_ndims, name="batch_dims"),
make_dims([sample_ndims, self.batch_ndims],
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
"""Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
x: `Tensor`.
name: Python `str`. The name to give this op.
Returns:
sample_shape: `Tensor` (1D, `int32`).
batch_shape: `Tensor` (1D, `int32`).
event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
sample_ndims = self.get_sample_ndims(x, name=name)
return (slice_shape([], sample_ndims,
name="sample_shape"),
slice_shape([sample_ndims], self.batch_ndims,
name="batch_shape"),
slice_shape([sample_ndims, self.batch_ndims], self.event_ndims,
name="event_shape"))
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def make_batch_of_event_sample_matrices(
self, x, expand_batch_dim=True,
name="make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
Args:
x: `Tensor`.
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims >= 1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: S+B+E
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, [1], event_shape)
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: [prod(S)]+B_+E_
x = distribution_util.rotate_transpose(x, shift=-1)
# x.shape: B_+E_+[prod(S)]
return x, sample_shape
# TODO(jvdillon): Make remove expand_batch_dim and make expand_batch_dim=False
# the default behavior.
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, expand_batch_dim=True,
name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B or not expand_batch_dim else [1]`,
- `E_ = E if E else [1]`,
- `S_ = [tf.reduce_prod(S)]`.
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
x: `Tensor` of shape `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
expand_batch_dim: Python `bool`. If `True` the batch dims will be expanded
such that `batch_ndims>=1`.
name: Python `str`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
# x.shape: _B+_E+[prod(S)]
sample_shape = ops.convert_to_tensor(sample_shape, name="sample_shape")
x = distribution_util.rotate_transpose(x, shift=1)
# x.shape: [prod(S)]+_B+_E
if self._is_all_constant_helper(self.batch_ndims, self.event_ndims):
if self._batch_ndims_is_0 or self._event_ndims_is_0:
squeeze_dims = []
if self._event_ndims_is_0:
squeeze_dims += [-1]
if self._batch_ndims_is_0 and expand_batch_dim:
squeeze_dims += [1]
if squeeze_dims:
x = array_ops.squeeze(x, axis=squeeze_dims)
# x.shape: [prod(S)]+B+E
_, batch_shape, event_shape = self.get_shape(x)
else:
s = (x.get_shape().as_list() if x.get_shape().is_fully_defined()
else array_ops.shape(x))
batch_shape = s[1:1+self.batch_ndims]
# Since sample_dims=1 and is left-most, we add 1 to the number of
# batch_ndims to get the event start dim.
event_start = array_ops.where_v2(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0), 2,
1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
new_shape = array_ops.concat([sample_shape, batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
# x.shape: S+B+E
return x
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + [self.batch_ndims, self.event_ndims])) as scope:
yield scope
def _is_all_constant_helper(self, *args):
"""Helper which returns True if all inputs are constant_value."""
return all(tensor_util.constant_value(x) is not None for x in args)
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _introspect_ndims(self, ndims):
"""Helper to establish some properties of input ndims args."""
if self._is_all_constant_helper(ndims):
return (tensor_util.constant_value(ndims),
tensor_util.constant_value(ndims) == 0)
return None, math_ops.equal(ndims, 0)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/shape.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to bridge `Distribution`s and `tf.contrib.learn.estimator` APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators.head import _compute_weighted_loss
from tensorflow.contrib.learn.python.learn.estimators.head import _RegressionHead
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.util import deprecation
__all__ = [
"estimator_head_distribution_regression",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def estimator_head_distribution_regression(make_distribution_fn,
label_dimension=1,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""Creates a `Head` for regression under a generic distribution.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the last
dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None` if
label is a `Tensor` (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure learns
the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and metrics
keys are suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
Returns:
An instance of `Head` for generic regression.
"""
return _DistributionRegressionHead(
make_distribution_fn=make_distribution_fn,
label_dimension=label_dimension,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
class _DistributionRegressionHead(_RegressionHead):
"""Creates a _RegressionHead instance from an arbitrary `Distribution`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
make_distribution_fn,
label_dimension,
logits_dimension=None,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None):
"""`Head` for regression.
Args:
make_distribution_fn: Python `callable` which returns a `tf.Distribution`
instance created using only logits.
label_dimension: Number of regression labels per example. This is the
size of the last dimension of the labels `Tensor` (typically, this has
shape `[batch_size, label_dimension]`).
logits_dimension: Number of logits per example. This is the size of the
last dimension of the logits `Tensor` (typically, this has shape
`[batch_size, logits_dimension]`).
Default value: `label_dimension`.
label_name: Python `str`, name of the key in label `dict`. Can be `None`
if label is a tensor (single headed models).
weight_column_name: Python `str` defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: Python `bool`. If `True`, estimator will learn a
centered bias variable for each class. Rest of the model structure
learns the residual after centered bias.
head_name: Python `str`, name of the head. Predictions, summary and
metrics keys are suffixed by `"/" + head_name` and the default variable
scope is `head_name`.
Raises:
TypeError: if `make_distribution_fn` is not `callable`.
"""
if not callable(make_distribution_fn):
raise TypeError("`make_distribution_fn` must be a callable function.")
self._distributions = {}
self._make_distribution_fn = make_distribution_fn
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
def loss_fn(labels, logits, weights=None):
"""Returns the loss of using `logits` to predict `labels`."""
d = self.distribution(logits)
labels_batch_shape = labels.shape.with_rank_at_least(1)[:-1]
labels_batch_shape = (
labels_batch_shape.as_list() if labels_batch_shape.is_fully_defined()
else array_ops.shape(labels)[:-1])
labels = array_ops.reshape(
labels,
shape=concat_vectors(labels_batch_shape, d.event_shape_tensor()))
return _compute_weighted_loss(
loss_unweighted=-d.log_prob(labels),
weight=weights)
def link_fn(logits):
"""Returns the inverse link function at `logits`."""
# Note: What the API calls a "link function" is really the inverse-link
# function, i.e., the "mean".
d = self.distribution(logits)
return d.mean()
super(_DistributionRegressionHead, self).__init__(
label_dimension=label_dimension,
loss_fn=loss_fn,
link_fn=link_fn,
logits_dimension=logits_dimension,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name)
@property
def distributions(self):
"""Returns all distributions created by `DistributionRegressionHead`."""
return self._distributions
def distribution(self, logits, name=None):
"""Retrieves a distribution instance, parameterized by `logits`.
Args:
logits: `float`-like `Tensor` representing the parameters of the
underlying distribution.
name: The Python `str` name to given to this op.
Default value: "distribution".
Returns:
distribution: `tf.Distribution` instance parameterized by `logits`.
"""
with ops.name_scope(name, "distribution", [logits]):
d = self._distributions.get(logits, None)
if d is None:
d = self._make_distribution_fn(logits)
self._distributions[logits] = d
return d
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/estimator.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Statistical test assertions calibrated for their error rates.
Statistical tests have an inescapable probability of error: a correct
sampler can still fail a test by chance, and an incorrect sampler can
still pass a test by chance. This library is about bounding both of
those error rates. This requires admitting a task-specific notion of
"discrepancy": Correct code will fail rarely, code that misbehaves by
more than the discrepancy will pass rarely, and nothing reliable can
be said about code that misbehaves, but misbehaves by less than the
discrepancy.
# Example
Consider testing that the mean of a scalar probability distribution P
is some expected constant. Suppose the support of P is the interval
`[0, 1]`. Then you might do this:
```python
from tensorflow_probability.python.distributions.internal import statistical_testing
expected_mean = ...
num_samples = 5000
samples = ... draw 5000 samples from P
# Check that the mean looks right
check1 = statistical_testing.assert_true_mean_equal_by_dkwm(
samples, low=0., high=1., expected=expected_mean,
false_fail_rate=1e-6)
# Check that the difference in means detectable with 5000 samples is
# small enough
check2 = tf.compat.v1.assert_less(
statistical_testing.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, low=0., high=1.0,
false_fail_rate=1e-6, false_pass_rate=1e-6),
0.01)
# Be sure to execute both assertion ops
sess.run([check1, check2])
```
The second assertion is an instance of experiment design. It's a
deterministic computation (independent of the code under test) that
checks that `5000` samples is enough to reliably resolve mean
differences of `0.01` or more. Here "reliably" means that if the code
under test is correct, the probability of drawing an unlucky sample
that causes this test to fail is at most 1e-6; and if the code under
test is incorrect enough that its true mean is 0.01 more or less than
expected, then the probability of drawing a "lucky" sample that causes
the test to false-pass is also at most 1e-6.
# Overview
Every function in this library can be characterized in terms of:
- The property being tested, such as the full density of the
distribution under test, or just its true mean, or a single
Bernoulli probability, etc.
- The relation being asserted, e.g., whether the mean is less, more,
or equal to the given expected value.
- The stochastic bound being relied upon, such as the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
or the CDF of the binomial distribution (for assertions about
Bernoulli probabilities).
- The number of sample sets in the statistical test. For example,
testing equality of means has a one-sample variant, where the
expected mean is given exactly, and a two-sample variant, where the
expected mean is itself given by a set of samples (e.g., from an
alternative algorithm).
- What operation(s) of the test are to be performed. Each test has
three of these:
1. `assert` executes the test. Specifically, it creates a TF op that
produces an error if it has enough evidence to prove that the
property under test is violated. These functions depend on the
desired false failure rate, because that determines the sizes of
appropriate confidence intervals, etc.
2. `min_discrepancy` computes the smallest difference reliably
detectable by that test, given the sample count and error rates.
What it's a difference of is test-specific. For example, a test
for equality of means would make detection guarantees about the
difference the true means.
3. `min_num_samples` computes the minimum number of samples needed
to reliably detect a given discrepancy with given error rates.
The latter two are for experimental design, and are meant to be
usable either interactively or inline in the overall test method.
This library follows a naming convention, to make room for every
combination of the above. A name mentions the operation first, then
the property, then the relation, then the bound, then, if the test
takes more than one set of samples, a token indicating this. For
example, `assert_true_mean_equal_by_dkwm` (which is implicitly
one-sample). Each name is a grammatically sound noun phrase (or verb
phrase, for the asserts).
# Asymptotic properties
The number of samples needed tends to scale as `O(1/discrepancy**2)` and
as `O(log(1/error_rate))`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
__all__ = [
"true_mean_confidence_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm",
"min_discrepancy_of_true_means_detectable_by_dkwm",
"min_num_samples_for_dkwm_mean_test",
"assert_true_mean_in_interval_by_dkwm",
"assert_true_mean_equal_by_dkwm_two_sample",
"min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
"min_num_samples_for_dkwm_mean_two_sample_test",
]
def _batch_sort_vector(x, ascending=True, name=None):
with ops.name_scope(name, "_batch_sort_vector", [x]):
x = ops.convert_to_tensor(x, name="x")
n = array_ops.shape(x)[-1]
if ascending:
y, _ = nn_ops.top_k(-x, k=n, sorted=True)
y = -y
else:
y, _ = nn_ops.top_k(x, k=n, sorted=True)
y.set_shape(x.shape)
return y
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with ops.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
n = array_ops.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = array_ops.concat([math_ops.range(1, n), [0]], axis=0)
samples = array_ops.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = array_ops.shape(samples)[-1]
batch_size = math_ops.cast(batch_size, dtype=samples.dtype.base_dtype)
step = 1. / batch_size
cum_steps = step * math_ops.range(
1, batch_size + 1, dtype=samples.dtype.base_dtype)
max_mean_contrib = clip_ops.clip_by_value(
cum_steps - envelope[..., array_ops.newaxis],
clip_value_min=0.,
clip_value_max=step)
return math_ops.reduce_sum(
samples * max_mean_contrib, axis=-1) + envelope * high
def _maximum_mean(samples, envelope, high, name=None):
"""Returns a stochastic upper bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded above, then
the mean is bounded above as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `high`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `high`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
high: Floating-point `Tensor` of upper bounds on the distributions'
supports. `samples <= high`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of upper bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be larger than
the corresponding `high`.
"""
with ops.name_scope(name, "maximum_mean", [samples, envelope, high]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
high = ops.convert_to_tensor(high, name="high")
xmax = math_ops.reduce_max(samples, axis=[0])
msg = "Given sample maximum value exceeds expectations"
check_op = check_ops.assert_less_equal(xmax, high, message=msg)
with ops.control_dependencies([check_op]):
return array_ops.identity(_do_maximum_mean(samples, envelope, high))
def _minimum_mean(samples, envelope, low, name=None):
"""Returns a stochastic lower bound on the mean of a scalar distribution.
The idea is that if the true CDF is within an `eps`-envelope of the
empirical CDF of the samples, and the support is bounded below, then
the mean is bounded below as well. In symbols,
```none
sup_x(|F_n(x) - F(x)|) < eps
```
The 0th dimension of `samples` is interpreted as independent and
identically distributed samples. The remaining dimensions are
broadcast together with `envelope` and `low`, and operated on
separately.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `envelope` and `low`.
envelope: Floating-point `Tensor` of sizes of admissible CDF
envelopes (i.e., the `eps` above).
low: Floating-point `Tensor` of lower bounds on the distributions'
supports. `samples >= low`.
name: A name for this operation (optional).
Returns:
bound: Floating-point `Tensor` of lower bounds on the true means.
Raises:
InvalidArgumentError: If some `sample` is found to be smaller than
the corresponding `low`.
"""
with ops.name_scope(name, "minimum_mean", [samples, envelope, low]):
samples = ops.convert_to_tensor(samples, name="samples")
envelope = ops.convert_to_tensor(envelope, name="envelope")
low = ops.convert_to_tensor(low, name="low")
xmin = math_ops.reduce_min(samples, axis=[0])
msg = "Given sample minimum value falls below expectations"
check_op = check_ops.assert_greater_equal(xmin, low, message=msg)
with ops.control_dependencies([check_op]):
return - _do_maximum_mean(-samples, envelope, -low)
def _dkwm_cdf_envelope(n, error_rate, name=None):
"""Computes the CDF envelope that the DKWM inequality licenses.
The [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
gives a stochastic bound on the distance between the true cumulative
distribution function (CDF) of any distribution and its empirical
CDF. To wit, for `n` iid samples from any distribution with CDF F,
```none
P(sup_x |F_n(x) - F(x)| > eps) < 2exp(-2n eps^2)
```
This function computes the envelope size `eps` as a function of the
number of samples `n` and the desired limit on the left-hand
probability above.
Args:
n: `Tensor` of numbers of samples drawn.
error_rate: Floating-point `Tensor` of admissible rates of mistakes.
name: A name for this operation (optional).
Returns:
eps: `Tensor` of maximum distances the true CDF can be from the
empirical CDF. This scales as `O(sqrt(-log(error_rate)))` and
as `O(1 / sqrt(n))`. The shape is the broadcast of `n` and
`error_rate`.
"""
with ops.name_scope(name, "dkwm_cdf_envelope", [n, error_rate]):
n = math_ops.cast(n, dtype=error_rate.dtype)
return math_ops.sqrt(-gen_math_ops.log(error_rate / 2.) / (2. * n))
def _check_shape_dominates(samples, parameters):
"""Check that broadcasting `samples` against `parameters` does not expand it.
Why? Because I want to be very sure that the samples tensor is not
accidentally enlarged by broadcasting against tensors that are
supposed to be describing the distribution(s) sampled from, lest the
sample counts end up inflated.
Args:
samples: A `Tensor` whose shape is to be protected against broadcasting.
parameters: A list of `Tensor`s who are parameters for the statistical test.
Returns:
samples: Return original `samples` with control dependencies attached
to ensure no broadcasting.
"""
def check(t):
samples_batch_shape = array_ops.shape(samples)[1:]
broadcasted_batch_shape = array_ops.broadcast_dynamic_shape(
samples_batch_shape, array_ops.shape(t))
# This rank check ensures that I don't get a wrong answer from the
# _shapes_ broadcasting against each other.
samples_batch_ndims = array_ops.size(samples_batch_shape)
ge = check_ops.assert_greater_equal(
samples_batch_ndims, array_ops.rank(t))
eq = check_ops.assert_equal(samples_batch_shape, broadcasted_batch_shape)
return ge, eq
checks = list(itertools.chain(*[check(t) for t in parameters]))
with ops.control_dependencies(checks):
return array_ops.identity(samples)
def true_mean_confidence_interval_by_dkwm(
samples, low, high, error_rate=1e-6, name=None):
"""Computes a confidence interval for the mean of a scalar distribution.
In batch mode, computes confidence intervals for all distributions
in the batch (which need not be identically distributed).
Relies on the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
The probability (over the randomness of drawing the given samples)
that any true mean is outside the corresponding returned interval is
no more than the given `error_rate`. The size of the intervals
scale as
`O(1 / sqrt(#samples))`, as `O(high - low)`, and as `O(-log(error_rate))`.
Note that `error_rate` is a total error rate for all the confidence
intervals in the batch. As such, if the batch is nontrivial, the
error rate is not broadcast but divided (evenly) among the batch
members.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
error_rate: *Scalar* floating-point `Tensor` admissible total rate
of mistakes.
name: A name for this operation (optional).
Returns:
low: A floating-point `Tensor` of stochastic lower bounds on the
true means.
high: A floating-point `Tensor` of stochastic upper bounds on the
true means.
"""
with ops.name_scope(
name, "true_mean_confidence_interval_by_dkwm",
[samples, low, high, error_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
error_rate = ops.convert_to_tensor(error_rate, name="error_rate")
samples = _check_shape_dominates(samples, [low, high])
check_ops.assert_scalar(error_rate) # Static shape
error_rate = _itemwise_error_rate(error_rate, [low, high], samples)
n = array_ops.shape(samples)[0]
envelope = _dkwm_cdf_envelope(n, error_rate)
min_mean = _minimum_mean(samples, envelope, low)
max_mean = _maximum_mean(samples, envelope, high)
return min_mean, max_mean
def _itemwise_error_rate(
total_error_rate, param_tensors, sample_tensor=None, name=None):
with ops.name_scope(
name, "itemwise_error_rate",
[total_error_rate, param_tensors, sample_tensor]):
result_shape = [1]
for p_tensor in param_tensors:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(p_tensor), result_shape)
if sample_tensor is not None:
result_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(sample_tensor)[1:], result_shape)
num_items = math_ops.reduce_prod(result_shape)
return total_error_rate / math_ops.cast(
num_items, dtype=total_error_rate.dtype)
def assert_true_mean_equal_by_dkwm(
samples, low, high, expected, false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is as expected.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the true mean of some distribution from which the given samples are
drawn is _not_ the given expected mean with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want to
check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected: Floating-point `Tensor` of expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean is
outside the corresponding confidence interval.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm",
[samples, low, high, expected, false_fail_rate]):
return assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected, expected, false_fail_rate)
def min_discrepancy_of_true_means_detectable_by_dkwm(
n, low, high, false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy that a DKWM-based test can detect.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true
means detectable by a DKWM-based test.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discr[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The detectable discrepancy scales as
- `O(high[i] - low[i])`,
- `O(1 / sqrt(n[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm",
[n, low, high, false_fail_rate, false_pass_rate]):
n = ops.convert_to_tensor(n, name="n")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Algorithm: Assume a true CDF F. The DKWM inequality gives a
# stochastic bound on how far the observed empirical CDF F_n can be.
# Then, using the DKWM inequality again gives a stochastic bound on
# the farthest candidate true CDF F' that
# true_mean_confidence_interval_by_dkwm might consider. At worst, these
# errors may go in the same direction, so the distance between F and
# F' is bounded by the sum.
# On batching: false fail rates sum, so I need to reduce
# the input to account for the batching. False pass rates
# max, so I don't.
sampling_envelope = _dkwm_cdf_envelope(n, false_pass_rate)
false_fail_rate = _itemwise_error_rate(false_fail_rate, [n, low, high])
analysis_envelope = _dkwm_cdf_envelope(n, false_fail_rate)
return (high - low) * (sampling_envelope + analysis_envelope)
def min_num_samples_for_dkwm_mean_test(
discrepancy, low, high,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a one-sample DKWM mean test.
To wit, returns an upper bound on the number of samples necessary to
guarantee detecting a mean difference of at least the given
`discrepancy`, with the given `false_fail_rate` and `false_pass_rate`,
using the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval)
on a scalar distribution supported on `[low, high]`.
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low: `Tensor` of lower bounds on the distributions' support.
high: `Tensor` of upper bounds on the distributions' support.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n: `Tensor` of numbers of samples to be drawn from the distributions
of interest.
The `discrepancy`, `low`, and `high` tensors must have
broadcast-compatible shapes.
For each batch member `i`, of `K` total, drawing `n[i]` samples from
some scalar distribution supported on `[low[i], high[i]]` is enough
to detect a difference in means of size `discrepancy[i]` or more.
Specifically, we guarantee that (a) if the true mean is the expected
mean (resp. in the expected interval), then `assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will fail with
probability at most `false_fail_rate / K` (which amounts to
`false_fail_rate` if applied to the whole batch at once), and (b) if
the true mean differs from the expected mean (resp. falls outside
the expected interval) by at least `discrepancy[i]`,
`assert_true_mean_equal_by_dkwm`
(resp. `assert_true_mean_in_interval_by_dkwm`) will pass with
probability at most `false_pass_rate`.
The required number of samples scales
as `O((high[i] - low[i])**2)`, `O(-log(false_fail_rate/K))`,
`O(-log(false_pass_rate))`, and `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_test",
[low, high, false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(
discrepancy, name="discrepancy")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate envelopes, but this is sound.
envelope1 = discrepancy / (2. * (high - low))
envelope2 = envelope1
false_fail_rate = _itemwise_error_rate(
false_fail_rate, [low, high, discrepancy])
n1 = -math_ops.log(false_fail_rate / 2.) / (2. * envelope1**2)
n2 = -math_ops.log(false_pass_rate / 2.) / (2. * envelope2**2)
return math_ops.maximum(n1, n2)
def assert_true_mean_in_interval_by_dkwm(
samples, low, high, expected_low, expected_high,
false_fail_rate=1e-6, name=None):
"""Asserts the mean of the given distribution is in the given interval.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the mean of the distribution from which the given samples are
drawn is _outside_ the given interval with statistical significance
`false_fail_rate` or stronger, otherwise passes. If you also want
to check that you are gathering enough evidence that a pass is not
spurious, see `min_num_samples_for_dkwm_mean_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples: Floating-point `Tensor` of samples from the distribution(s)
of interest. Entries are assumed IID across the 0th dimension.
The other dimensions must broadcast with `low` and `high`.
The support is bounded: `low <= samples <= high`.
low: Floating-point `Tensor` of lower bounds on the distributions'
supports.
high: Floating-point `Tensor` of upper bounds on the distributions'
supports.
expected_low: Floating-point `Tensor` of lower bounds on the
expected true means.
expected_high: Floating-point `Tensor` of upper bounds on the
expected true means.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any expected mean
interval does not overlap with the corresponding confidence
interval.
"""
with ops.name_scope(
name, "assert_true_mean_in_interval_by_dkwm",
[samples, low, high, expected_low, expected_high, false_fail_rate]):
samples = ops.convert_to_tensor(samples, name="samples")
low = ops.convert_to_tensor(low, name="low")
high = ops.convert_to_tensor(high, name="high")
expected_low = ops.convert_to_tensor(expected_low, name="expected_low")
expected_high = ops.convert_to_tensor(expected_high, name="expected_high")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples = _check_shape_dominates(
samples, [low, high, expected_low, expected_high])
min_mean, max_mean = true_mean_confidence_interval_by_dkwm(
samples, low, high, false_fail_rate)
# Assert that the interval [min_mean, max_mean] intersects the
# interval [expected_low, expected_high]. This is true if
# max_mean >= expected_low and min_mean <= expected_high.
# By DeMorgan's law, that's also equivalent to
# not (max_mean < expected_low or min_mean > expected_high),
# which is a way of saying the two intervals are not disjoint.
check_confidence_interval_can_intersect = check_ops.assert_greater_equal(
max_mean, expected_low, message="Confidence interval does not "
"intersect: true mean smaller than expected")
with ops.control_dependencies([check_confidence_interval_can_intersect]):
return check_ops.assert_less_equal(
min_mean, expected_high, message="Confidence interval does not "
"intersect: true mean greater than expected")
def assert_true_mean_equal_by_dkwm_two_sample(
samples1, low1, high1, samples2, low2, high2,
false_fail_rate=1e-6, name=None):
"""Asserts the means of the given distributions are equal.
More precisely, fails if there is enough evidence (using the
[Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval))
that the means of the distributions from which the given samples are
drawn are _not_ equal with statistical significance `false_fail_rate`
or stronger, otherwise passes. If you also want to check that you
are gathering enough evidence that a pass is not spurious, see
`min_num_samples_for_dkwm_mean_two_sample_test` and
`min_discrepancy_of_true_means_detectable_by_dkwm_two_sample`.
Note that `false_fail_rate` is a total false failure rate for all
the assertions in the batch. As such, if the batch is nontrivial,
the assertion will insist on stronger evidence to fail any one member.
Args:
samples1: Floating-point `Tensor` of samples from the
distribution(s) A. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low1 <= samples1 <= high1`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
samples2: Floating-point `Tensor` of samples from the
distribution(s) B. Entries are assumed IID across the 0th
dimension. The other dimensions must broadcast with `low1`,
`high1`, `low2`, and `high2`.
The support is bounded: `low2 <= samples2 <= high2`.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of mistakes.
name: A name for this operation (optional).
Returns:
check: Op that raises `InvalidArgumentError` if any pair of confidence
intervals true for corresponding true means do not overlap.
"""
with ops.name_scope(
name, "assert_true_mean_equal_by_dkwm_two_sample",
[samples1, low1, high1, samples2, low2, high2, false_fail_rate]):
samples1 = ops.convert_to_tensor(samples1, name="samples1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
samples2 = ops.convert_to_tensor(samples2, name="samples2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
samples1 = _check_shape_dominates(samples1, [low1, high1])
samples2 = _check_shape_dominates(samples2, [low2, high2])
compatible_samples = check_ops.assert_equal(
array_ops.shape(samples1)[1:], array_ops.shape(samples2)[1:])
with ops.control_dependencies([compatible_samples]):
# Could in principle play games with cleverly allocating
# significance instead of the even split below. It may be possible
# to get tighter intervals, in order to obtain a higher power test.
# Any allocation strategy that depends only on the support bounds
# and sample counts should be valid; however, because the intervals
# scale as O(-log(false_fail_rate)), there doesn't seem to be much
# room to win.
min_mean_2, max_mean_2 = true_mean_confidence_interval_by_dkwm(
samples2, low2, high2, false_fail_rate / 2.)
return assert_true_mean_in_interval_by_dkwm(
samples1, low1, high1, min_mean_2, max_mean_2, false_fail_rate / 2.)
def min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1, low1, high1, n2, low2, high2,
false_fail_rate, false_pass_rate, name=None):
"""Returns the minimum mean discrepancy for a two-sample DKWM-based test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Note that `false_fail_rate` is a total false failure rate for all
the tests in the batch. As such, if the batch is nontrivial, each
member will demand more samples. The `false_pass_rate` is also
interpreted as a total, but is treated asymmetrically: If each test
in the batch detects its corresponding discrepancy with probability
at least `1 - false_pass_rate`, then running all those tests and
failing if any one fails will jointly detect all those discrepancies
with the same `false_pass_rate`.
Args:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
discr: `Tensor` of lower bounds on the distances between true means
detectable by a two-sample DKWM-based test.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The detectable distribution scales as
- `O(high1[i] - low1[i])`, `O(high2[i] - low2[i])`,
- `O(1 / sqrt(n1[i]))`, `O(1 / sqrt(n2[i]))`,
- `O(-log(false_fail_rate/K))`, and
- `O(-log(false_pass_rate))`.
"""
with ops.name_scope(
name, "min_discrepancy_of_true_means_detectable_by_dkwm_two_sample",
[n1, low1, high1, n2, low2, high2, false_fail_rate, false_pass_rate]):
n1 = ops.convert_to_tensor(n1, name="n1")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
n2 = ops.convert_to_tensor(n2, name="n2")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
det_disc1 = min_discrepancy_of_true_means_detectable_by_dkwm(
n1, low1, high1, false_fail_rate / 2., false_pass_rate / 2.)
det_disc2 = min_discrepancy_of_true_means_detectable_by_dkwm(
n2, low2, high2, false_fail_rate / 2., false_pass_rate / 2.)
return det_disc1 + det_disc2
def min_num_samples_for_dkwm_mean_two_sample_test(
discrepancy, low1, high1, low2, high2,
false_fail_rate=1e-6, false_pass_rate=1e-6, name=None):
"""Returns how many samples suffice for a two-sample DKWM mean test.
DKWM is the [Dvoretzky-Kiefer-Wolfowitz-Massart inequality]
(https://en.wikipedia.org/wiki/CDF-based_nonparametric_confidence_interval).
Args:
discrepancy: Floating-point `Tensor` of desired upper limits on mean
differences that may go undetected with probability higher than
`1 - false_pass_rate`.
low1: Floating-point `Tensor` of lower bounds on the supports of the
distributions A.
high1: Floating-point `Tensor` of upper bounds on the supports of
the distributions A.
low2: Floating-point `Tensor` of lower bounds on the supports of the
distributions B.
high2: Floating-point `Tensor` of upper bounds on the supports of
the distributions B.
false_fail_rate: *Scalar* floating-point `Tensor` admissible total
rate of false failures.
false_pass_rate: *Scalar* floating-point `Tensor` admissible rate
of false passes.
name: A name for this operation (optional).
Returns:
n1: `Tensor` of numbers of samples to be drawn from the distributions A.
n2: `Tensor` of numbers of samples to be drawn from the distributions B.
For each batch member `i`, of `K` total, drawing `n1[i]` samples
from scalar distribution A supported on `[low1[i], high1[i]]` and `n2[i]`
samples from scalar distribution B supported on `[low2[i], high2[i]]`
is enough to detect a difference in their true means of size
`discr[i]` or more. Specifically, we guarantee that (a) if their
true means are equal, `assert_true_mean_equal_by_dkwm_two_sample`
will fail with probability at most `false_fail_rate/K` (which
amounts to `false_fail_rate` if applied to the whole batch at once),
and (b) if their true means differ by at least `discr[i]`,
`assert_true_mean_equal_by_dkwm_two_sample` will pass with
probability at most `false_pass_rate`.
The required number of samples scales as
- `O((high1[i] - low1[i])**2)`, `O((high2[i] - low2[i])**2)`,
- `O(-log(false_fail_rate/K))`,
- `O(-log(false_pass_rate))`, and
- `O(1 / discrepancy[i]**2)`.
"""
with ops.name_scope(
name, "min_num_samples_for_dkwm_mean_two_sample_test",
[low1, high1, low2, high2,
false_fail_rate, false_pass_rate, discrepancy]):
discrepancy = ops.convert_to_tensor(discrepancy, name="discrepancy")
low1 = ops.convert_to_tensor(low1, name="low1")
high1 = ops.convert_to_tensor(high1, name="high1")
low2 = ops.convert_to_tensor(low2, name="low2")
high2 = ops.convert_to_tensor(high2, name="high2")
false_fail_rate = ops.convert_to_tensor(
false_fail_rate, name="false_fail_rate")
false_pass_rate = ops.convert_to_tensor(
false_pass_rate, name="false_pass_rate")
# Could choose to cleverly allocate discrepancy tolerances and
# failure probabilities, but this is sound.
n1 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low1, high1,
false_fail_rate / 2., false_pass_rate / 2.)
n2 = min_num_samples_for_dkwm_mean_test(
discrepancy / 2., low2, high2,
false_fail_rate / 2., false_pass_rate / 2.)
return n1, n2
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/statistical_testing.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distributions
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = ["QuantizedDistribution"]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _logsum_expbig_minus_expsmall(big, small):
"""Stable evaluation of `Log[exp{big} - exp{small}]`.
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
big: Floating-point `Tensor`
small: Floating-point `Tensor` with same `dtype` as `big` and broadcastable
shape.
Returns:
`Tensor` of same `dtype` of `big` and broadcast shape.
"""
with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]):
return math_ops.log(1. - math_ops.exp(small - big)) + big
_prob_base_note = """
For whole numbers `y`,
```
P[Y = y] := P[X <= low], if y == low,
:= P[X > high - 1], y == high,
:= 0, if j < low or y > high,
:= P[y - 1 < X <= y], all other y.
```
"""
_prob_note = _prob_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`. If the
base distribution has a `survival_function` method, results will be more
accurate for large values of `y`, and in this case the `survival_function` must
also be defined on `y - 1`.
"""
_log_prob_note = _prob_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`. If the
base distribution has a `log_survival_function` method results will be more
accurate for large values of `y`, and in this case the `log_survival_function`
must also be defined on `y - 1`.
"""
_cdf_base_note = """
For whole numbers `y`,
```
cdf(y) := P[Y <= y]
= 1, if y >= high,
= 0, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_cdf_note = _cdf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_cdf_note = _cdf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
_sf_base_note = """
For whole numbers `y`,
```
survival_function(y) := P[Y > y]
= 0, if y >= high,
= 1, if y < low,
= P[X <= y], otherwise.
```
Since `Y` only has mass at whole numbers, `P[Y <= y] = P[Y <= floor(y)]`.
This dictates that fractional `y` are first floored to a whole number, and
then above definition applies.
"""
_sf_note = _sf_base_note + """
The base distribution's `cdf` method must be defined on `y - 1`.
"""
_log_sf_note = _sf_base_note + """
The base distribution's `log_cdf` method must be defined on `y - 1`.
"""
class QuantizedDistribution(distributions.Distribution):
"""Distribution representing the quantization `Y = ceiling(X)`.
#### Definition in Terms of Sampling
```
1. Draw X
2. Set Y <-- ceiling(X)
3. If Y < low, reset Y <-- low
4. If Y > high, reset Y <-- high
5. Return Y
```
#### Definition in Terms of the Probability Mass Function
Given scalar random variable `X`, we define a discrete random variable `Y`
supported on the integers as follows:
```
P[Y = j] := P[X <= low], if j == low,
:= P[X > high - 1], j == high,
:= 0, if j < low or j > high,
:= P[j - 1 < X <= j], all other j.
```
Conceptually, without cutoffs, the quantization process partitions the real
line `R` into half open intervals, and identifies an integer `j` with the
right endpoints:
```
R = ... (-2, -1](-1, 0](0, 1](1, 2](2, 3](3, 4] ...
j = ... -1 0 1 2 3 4 ...
```
`P[Y = j]` is the mass of `X` within the `jth` interval.
If `low = 0`, and `high = 2`, then the intervals are redrawn
and `j` is re-assigned:
```
R = (-infty, 0](0, 1](1, infty)
j = 0 1 2
```
`P[Y = j]` is still the mass of `X` within the `jth` interval.
#### Examples
We illustrate a mixture of discretized logistic distributions
[(Salimans et al., 2017)][1]. This is used, for example, for capturing 16-bit
audio in WaveNet [(van den Oord et al., 2017)][2]. The values range in
a 1-D integer domain of `[0, 2**16-1]`, and the discretization captures
`P(x - 0.5 < X <= x + 0.5)` for all `x` in the domain excluding the endpoints.
The lowest value has probability `P(X <= 0.5)` and the highest value has
probability `P(2**16 - 1.5 < X)`.
Below we assume a `wavenet` function. It takes as `input` right-shifted audio
samples of shape `[..., sequence_length]`. It returns a real-valued tensor of
shape `[..., num_mixtures * 3]`, i.e., each mixture component has a `loc` and
`scale` parameter belonging to the logistic distribution, and a `logits`
parameter determining the unnormalized probability of that component.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
net = wavenet(inputs)
loc, unconstrained_scale, logits = tf.split(net,
num_or_size_splits=3,
axis=-1)
scale = tf.nn.softplus(unconstrained_scale)
# Form mixture of discretized logistic distributions. Note we shift the
# logistic distribution by -0.5. This lets the quantization capture "rounding"
# intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
discretized_logistic_dist = tfd.QuantizedDistribution(
distribution=tfd.TransformedDistribution(
distribution=tfd.Logistic(loc=loc, scale=scale),
bijector=tfb.AffineScalar(shift=-0.5)),
low=0.,
high=2**16 - 1.)
mixture_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=logits),
components_distribution=discretized_logistic_dist)
neg_log_likelihood = -tf.reduce_sum(mixture_dist.log_prob(targets))
train_op = tf.train.AdamOptimizer().minimize(neg_log_likelihood)
```
After instantiating `mixture_dist`, we illustrate maximum likelihood by
calculating its log-probability of audio samples as `target` and optimizing.
#### References
[1]: Tim Salimans, Andrej Karpathy, Xi Chen, and Diederik P. Kingma.
PixelCNN++: Improving the PixelCNN with discretized logistic mixture
likelihood and other modifications.
_International Conference on Learning Representations_, 2017.
https://arxiv.org/abs/1701.05517
[2]: Aaron van den Oord et al. Parallel WaveNet: Fast High-Fidelity Speech
Synthesis. _arXiv preprint arXiv:1711.10433_, 2017.
https://arxiv.org/abs/1711.10433
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution,
low=None,
high=None,
validate_args=False,
name="QuantizedDistribution"):
"""Construct a Quantized Distribution representing `Y = ceiling(X)`.
Some properties are inherited from the distribution defining `X`. Example:
`allow_nan_stats` is determined for this `QuantizedDistribution` by reading
the `distribution`.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
low: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`low`.
high: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's `prob` should be defined at
`high - 1`.
`high` must be strictly greater than `low`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: If `dist_cls` is not a subclass of
`Distribution` or continuous.
NotImplementedError: If the base distribution does not implement `cdf`.
"""
parameters = dict(locals())
values = (
list(distribution.parameters.values()) +
[low, high])
with ops.name_scope(name, values=values) as name:
self._dist = distribution
if low is not None:
low = ops.convert_to_tensor(low, name="low")
if high is not None:
high = ops.convert_to_tensor(high, name="high")
check_ops.assert_same_float_dtype(
tensors=[self.distribution, low, high])
# We let QuantizedDistribution access _graph_parents since this class is
# more like a baseclass.
graph_parents = self._dist._graph_parents # pylint: disable=protected-access
checks = []
if validate_args and low is not None and high is not None:
message = "low must be strictly less than high."
checks.append(
check_ops.assert_less(
low, high, message=message))
self._validate_args = validate_args # self._check_integer uses this.
with ops.control_dependencies(checks if validate_args else []):
if low is not None:
self._low = self._check_integer(low)
graph_parents += [self._low]
else:
self._low = None
if high is not None:
self._high = self._check_integer(high)
graph_parents += [self._high]
else:
self._high = None
super(QuantizedDistribution, self).__init__(
dtype=self._dist.dtype,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=self._dist.allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._dist
@property
def low(self):
"""Lowest value that quantization returns."""
return self._low
@property
def high(self):
"""Highest value that quantization returns."""
return self._high
def _batch_shape_tensor(self):
return self.distribution.batch_shape_tensor()
def _batch_shape(self):
return self.distribution.batch_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
low = self._low
high = self._high
with ops.name_scope("transform"):
n = ops.convert_to_tensor(n, name="n")
x_samps = self.distribution.sample(n, seed=seed)
ones = array_ops.ones_like(x_samps)
# Snap values to the intervals (j - 1, j].
result_so_far = math_ops.ceil(x_samps)
if low is not None:
result_so_far = array_ops.where(result_so_far < low,
low * ones, result_so_far)
if high is not None:
result_so_far = array_ops.where(result_so_far > high,
high * ones, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_prob_note)
def _log_prob(self, y):
if not hasattr(self.distribution, "_log_cdf"):
raise NotImplementedError(
"'log_prob' not implemented unless the base distribution implements "
"'log_cdf'")
y = self._check_integer(y)
try:
return self._log_prob_with_logsf_and_logcdf(y)
except NotImplementedError:
return self._log_prob_with_logcdf(y)
def _log_prob_with_logcdf(self, y):
return _logsum_expbig_minus_expsmall(self.log_cdf(y), self.log_cdf(y - 1))
def _log_prob_with_logsf_and_logcdf(self, y):
"""Compute log_prob(y) using log survival_function and cdf together."""
# There are two options that would be equal if we had infinite precision:
# Log[ sf(y - 1) - sf(y) ]
# = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ]
# Log[ cdf(y) - cdf(y - 1) ]
# = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ]
logsf_y = self.log_survival_function(y)
logsf_y_minus_1 = self.log_survival_function(y - 1)
logcdf_y = self.log_cdf(y)
logcdf_y_minus_1 = self.log_cdf(y - 1)
# Important: Here we use select in a way such that no input is inf, this
# prevents the troublesome case where the output of select can be finite,
# but the output of grad(select) will be NaN.
# In either case, we are doing Log[ exp{big} - exp{small} ]
# We want to use the sf items precisely when we are on the right side of the
# median, which occurs when logsf_y < logcdf_y.
big = array_ops.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y)
small = array_ops.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1)
return _logsum_expbig_minus_expsmall(big, small)
@distribution_util.AppendDocstring(_prob_note)
def _prob(self, y):
if not hasattr(self.distribution, "_cdf"):
raise NotImplementedError(
"'prob' not implemented unless the base distribution implements "
"'cdf'")
y = self._check_integer(y)
try:
return self._prob_with_sf_and_cdf(y)
except NotImplementedError:
return self._prob_with_cdf(y)
def _prob_with_cdf(self, y):
return self.cdf(y) - self.cdf(y - 1)
def _prob_with_sf_and_cdf(self, y):
# There are two options that would be equal if we had infinite precision:
# sf(y - 1) - sf(y)
# cdf(y) - cdf(y - 1)
sf_y = self.survival_function(y)
sf_y_minus_1 = self.survival_function(y - 1)
cdf_y = self.cdf(y)
cdf_y_minus_1 = self.cdf(y - 1)
# sf_prob has greater precision iff we're on the right side of the median.
return array_ops.where(
sf_y < cdf_y, # True iff we're on the right side of the median.
sf_y_minus_1 - sf_y,
cdf_y - cdf_y_minus_1)
@distribution_util.AppendDocstring(_log_cdf_note)
def _log_cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j < low, neg_inf, result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_cdf_note)
def _cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = math_ops.floor(y)
# P[X <= j], used when low < X < high.
result_so_far = self.distribution.cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.ones_like(result_so_far),
result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_log_sf_note)
def _log_survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.log_survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.zeros_like(result_so_far),
result_so_far)
if high is not None:
neg_inf = -np.inf * array_ops.ones_like(result_so_far)
result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)
return result_so_far
@distribution_util.AppendDocstring(_sf_note)
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = math_ops.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += array_ops.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = array_ops.where(j < low,
array_ops.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = array_ops.where(j >= high,
array_ops.zeros_like(result_so_far),
result_so_far)
return result_so_far
def _check_integer(self, value):
with ops.name_scope("check_integer", values=[value]):
value = ops.convert_to_tensor(value, name="value")
if not self.validate_args:
return value
dependencies = [distribution_util.assert_integer_form(
value, message="value has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, value)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/quantized_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Wishart distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"WishartCholesky",
"WishartFull",
]
class _WishartLinearOperator(distribution.Distribution):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar number of degrees of freedom `df` and
an instance of `LinearOperator`, which provides matrix-free access to a
symmetric positive definite operator, which defines the scale matrix.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
See `WishartFull`, `WishartCholesky` for examples of initializing and using
this class.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale_operator,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct Wishart distributions.
Args:
df: `float` or `double` tensor, the degrees of freedom of the
distribution(s). `df` must be greater than or equal to `k`.
scale_operator: `float` or `double` instance of `LinearOperator`.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if scale is not floating-type
TypeError: if scale.dtype != df.dtype
ValueError: if df < k, where scale operator event shape is
`(k, k)`
"""
parameters = dict(locals())
self._cholesky_input_output_matrices = cholesky_input_output_matrices
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[df, scale_operator]):
if not scale_operator.dtype.is_floating:
raise TypeError(
"scale_operator.dtype=%s is not a floating-point type" %
scale_operator.dtype)
if not scale_operator.is_square:
print(scale_operator.to_dense().eval())
raise ValueError("scale_operator must be square.")
self._scale_operator = scale_operator
self._df = ops.convert_to_tensor(
df,
dtype=scale_operator.dtype,
name="df")
contrib_tensor_util.assert_same_float_dtype(
(self._df, self._scale_operator))
if (self._scale_operator.shape.ndims is None or
self._scale_operator.shape.dims[-1].value is None):
self._dimension = math_ops.cast(
self._scale_operator.domain_dimension_tensor(),
dtype=self._scale_operator.dtype, name="dimension")
else:
self._dimension = ops.convert_to_tensor(
self._scale_operator.shape.dims[-1].value,
dtype=self._scale_operator.dtype, name="dimension")
df_val = tensor_util.constant_value(self._df)
dim_val = tensor_util.constant_value(self._dimension)
if df_val is not None and dim_val is not None:
df_val = np.asarray(df_val)
if not df_val.shape:
df_val = [df_val]
if any(df_val < dim_val):
raise ValueError(
"Degrees of freedom (df = %s) cannot be less than "
"dimension of scale matrix (scale.dimension = %s)"
% (df_val, dim_val))
elif validate_args:
assertions = check_ops.assert_less_equal(
self._dimension, self._df,
message=("Degrees of freedom (df = %s) cannot be "
"less than dimension of scale matrix "
"(scale.dimension = %s)" %
(self._dimension, self._df)))
self._df = control_flow_ops.with_dependencies(
[assertions], self._df)
super(_WishartLinearOperator, self).__init__(
dtype=self._scale_operator.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
parameters=parameters,
graph_parents=([self._df, self._dimension] +
self._scale_operator.graph_parents),
name=name)
@property
def df(self):
"""Wishart distribution degree(s) of freedom."""
return self._df
def _square_scale_operator(self):
return self.scale_operator.matmul(
self.scale_operator.to_dense(), adjoint_arg=True)
def scale(self):
"""Wishart distribution scale matrix."""
if self._cholesky_input_output_matrices:
return self.scale_operator.to_dense()
else:
return self._square_scale_operator()
@property
def scale_operator(self):
"""Wishart distribution scale matrix as an Linear Operator."""
return self._scale_operator
@property
def cholesky_input_output_matrices(self):
"""Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
def dimension(self):
"""Dimension of underlying vector space. The `p` in `R^(p*p)`."""
return self._dimension
def _event_shape_tensor(self):
dimension = self.scale_operator.domain_dimension_tensor()
return array_ops.stack([dimension, dimension])
def _event_shape(self):
dimension = self.scale_operator.domain_dimension
return tensor_shape.TensorShape([dimension, dimension])
def _batch_shape_tensor(self):
return self.scale_operator.batch_shape_tensor()
def _batch_shape(self):
return self.scale_operator.batch_shape
def _sample_n(self, n, seed):
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
batch_ndims = array_ops.shape(batch_shape)[0]
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
shape = array_ops.concat([[n], batch_shape, event_shape], 0)
# Complexity: O(nbk**2)
x = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
dtype=self.dtype,
seed=seed)
# Complexity: O(nbk)
# This parametrization is equivalent to Chi2, i.e.,
# ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
expanded_df = self.df * array_ops.ones(
self.scale_operator.batch_shape_tensor(),
dtype=self.df.dtype.base_dtype)
g = random_ops.random_gamma(shape=[n],
alpha=self._multi_gamma_sequence(
0.5 * expanded_df, self.dimension),
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(
seed, "wishart"))
# Complexity: O(nbk**2)
x = array_ops.matrix_band_part(x, -1, 0) # Tri-lower.
# Complexity: O(nbk)
x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))
# Make batch-op ready.
# Complexity: O(nbk**2)
perm = array_ops.concat([math_ops.range(1, ndims), [0]], 0)
x = array_ops.transpose(x, perm)
shape = array_ops.concat([batch_shape, [event_shape[0]], [-1]], 0)
x = array_ops.reshape(x, shape)
# Complexity: O(nbM) where M is the complexity of the operator solving a
# vector system. E.g., for LinearOperatorDiag, each matmul is O(k**2), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each matmul is O(k^3) so this step has complexity O(nbk^3).
x = self.scale_operator.matmul(x)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, [n]], 0)
x = array_ops.reshape(x, shape)
perm = array_ops.concat([[ndims - 1], math_ops.range(0, ndims - 1)], 0)
x = array_ops.transpose(x, perm)
if not self.cholesky_input_output_matrices:
# Complexity: O(nbk^3)
x = math_ops.matmul(x, x, adjoint_b=True)
return x
def _log_prob(self, x):
if self.cholesky_input_output_matrices:
x_sqrt = x
else:
# Complexity: O(nbk^3)
x_sqrt = linalg_ops.cholesky(x)
batch_shape = self.batch_shape_tensor()
event_shape = self.event_shape_tensor()
ndims = array_ops.rank(x_sqrt)
# sample_ndims = ndims - batch_ndims - event_ndims
sample_ndims = ndims - array_ops.shape(batch_shape)[0] - 2
sample_shape = array_ops.strided_slice(
array_ops.shape(x_sqrt), [0], [sample_ndims])
# We need to be able to pre-multiply each matrix by its corresponding
# batch scale matrix. Since a Distribution Tensor supports multiple
# samples per batch, this means we need to reshape the input matrix `x`
# so that the first b dimensions are batch dimensions and the last two
# are of shape [dimension, dimensions*number_of_samples]. Doing these
# gymnastics allows us to do a batch_solve.
#
# After we're done with sqrt_solve (the batch operation) we need to undo
# this reshaping so what we're left with is a Tensor partitionable by
# sample, batch, event dimensions.
# Complexity: O(nbk**2) since transpose must access every element.
scale_sqrt_inv_x_sqrt = x_sqrt
perm = array_ops.concat([math_ops.range(sample_ndims, ndims),
math_ops.range(0, sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
shape = array_ops.concat(
(batch_shape, (math_ops.cast(
self.dimension, dtype=dtypes.int32), -1)),
0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
# Complexity: O(nbM*k) where M is the complexity of the operator solving
# a vector system. E.g., for LinearOperatorDiag, each solve is O(k), so
# this complexity is O(nbk**2). For LinearOperatorLowerTriangular,
# each solve is O(k**2) so this step has complexity O(nbk^3).
scale_sqrt_inv_x_sqrt = self.scale_operator.solve(
scale_sqrt_inv_x_sqrt)
# Undo make batch-op ready.
# Complexity: O(nbk**2)
shape = array_ops.concat([batch_shape, event_shape, sample_shape], 0)
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
perm = array_ops.concat([math_ops.range(ndims - sample_ndims, ndims),
math_ops.range(0, ndims - sample_ndims)], 0)
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
# Write V = SS', X = LL'. Then:
# tr[inv(V) X] = tr[inv(S)' inv(S) L L']
# = tr[inv(S) L L' inv(S)']
# = tr[(inv(S) L) (inv(S) L)']
# = sum_{ik} (inv(S) L)_{ik}**2
# The second equality follows from the cyclic permutation property.
# Complexity: O(nbk**2)
trace_scale_inv_x = math_ops.reduce_sum(
math_ops.square(scale_sqrt_inv_x_sqrt),
axis=[-2, -1])
# Complexity: O(nbk)
half_log_det_x = math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(x_sqrt)),
axis=[-1])
# Complexity: O(nbk**2)
log_prob = ((self.df - self.dimension - 1.) * half_log_det_x -
0.5 * trace_scale_inv_x -
self.log_normalization())
# Set shape hints.
# Try to merge what we know from the input then what we know from the
# parameters of this distribution.
if x.get_shape().ndims is not None:
log_prob.set_shape(x.get_shape()[:-2])
if (log_prob.get_shape().ndims is not None and
self.batch_shape.ndims is not None and
self.batch_shape.ndims > 0):
log_prob.get_shape()[-self.batch_shape.ndims:].merge_with(
self.batch_shape)
return log_prob
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
half_dp1 = 0.5 * self.dimension + 0.5
half_df = 0.5 * self.df
return (self.dimension * (half_df + half_dp1 * math.log(2.)) +
2 * half_dp1 * self.scale_operator.log_abs_determinant() +
self._multi_lgamma(half_df, self.dimension) +
(half_dp1 - half_df) * self._multi_digamma(half_df, self.dimension))
def _mean(self):
if self.cholesky_input_output_matrices:
return (math_ops.sqrt(self.df)
* self.scale_operator.to_dense())
return self.df * self._square_scale_operator()
def _variance(self):
x = math_ops.sqrt(self.df) * self._square_scale_operator()
d = array_ops.expand_dims(array_ops.matrix_diag_part(x), -1)
v = math_ops.square(x) + math_ops.matmul(d, d, adjoint_b=True)
if self.cholesky_input_output_matrices:
return linalg_ops.cholesky(v)
return v
def _stddev(self):
if self.cholesky_input_output_matrices:
raise ValueError(
"Computing std. dev. when is cholesky_input_output_matrices=True "
"does not make sense.")
return linalg_ops.cholesky(self.variance())
def _mode(self):
s = self.df - self.dimension - 1.
s = array_ops.where_v2(
math_ops.less(s, 0.),
constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"), s)
if self.cholesky_input_output_matrices:
return math_ops.sqrt(s) * self.scale_operator.to_dense()
return s * self._square_scale_operator()
def mean_log_det(self, name="mean_log_det"):
"""Computes E[log(det(X))] under this Wishart distribution."""
with self._name_scope(name):
return (self._multi_digamma(0.5 * self.df, self.dimension) +
self.dimension * math.log(2.) +
2 * self.scale_operator.log_abs_determinant())
def log_normalization(self, name="log_normalization"):
"""Computes the log normalizing constant, log(Z)."""
with self._name_scope(name):
return (self.df * self.scale_operator.log_abs_determinant() +
0.5 * self.df * self.dimension * math.log(2.) +
self._multi_lgamma(0.5 * self.df, self.dimension))
def _multi_gamma_sequence(self, a, p, name="multi_gamma_sequence"):
"""Creates sequence used in multivariate (di)gamma; shape = shape(a)+[p]."""
with self._name_scope(name, values=[a, p]):
# Linspace only takes scalars, so we'll add in the offset afterwards.
seq = math_ops.linspace(
constant_op.constant(0., dtype=self.dtype),
0.5 - 0.5 * p,
math_ops.cast(p, dtypes.int32))
return seq + array_ops.expand_dims(a, [-1])
def _multi_lgamma(self, a, p, name="multi_lgamma"):
"""Computes the log multivariate gamma function; log(Gamma_p(a))."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return (0.25 * p * (p - 1.) * math.log(math.pi) +
math_ops.reduce_sum(math_ops.lgamma(seq),
axis=[-1]))
def _multi_digamma(self, a, p, name="multi_digamma"):
"""Computes the multivariate digamma function; Psi_p(a)."""
with self._name_scope(name, values=[a, p]):
seq = self._multi_gamma_sequence(a, p)
return math_ops.reduce_sum(math_ops.digamma(seq),
axis=[-1])
class WishartCholesky(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
lower, triangular Cholesky factor which characterizes the scale matrix.
Using WishartCholesky is a constant-time improvement over WishartFull. It
saves an O(nbk^3) operation, i.e., a matrix-product operation for sampling
and a Cholesky factorization in log_prob. For most use-cases it often saves
another O(nbk^3) operation since most uses of Wishart will also use the
Cholesky factorization.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Cholesky factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
chol_scale = tf.linalg.cholesky(...) # Shape is [3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Cholesky factored scale matrices.
df = [5, 4]
chol_scale = tf.linalg.cholesky(...) # Shape is [2, 3, 3].
dist = tfd.WishartCholesky(df=df, scale=chol_scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3].
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfp.distributions.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartCholesky"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(scale),
message="scale must be positive definite"),
check_ops.assert_equal(
array_ops.shape(scale)[-1],
array_ops.shape(scale)[-2],
message="scale must be square")
] if validate_args else [], scale)
super(WishartCholesky, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=scale,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
class WishartFull(_WishartLinearOperator):
"""The matrix Wishart distribution on positive definite matrices.
This distribution is defined by a scalar degrees of freedom `df` and a
symmetric, positive definite scale matrix.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations
where `(k, k)` is the event space shape.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(X; df, scale) = det(X)**(0.5 (df-k-1)) exp(-0.5 tr[inv(scale) X]) / Z
Z = 2**(0.5 df k) |det(scale)|**(0.5 df) Gamma_k(0.5 df)
```
where:
* `df >= k` denotes the degrees of freedom,
* `scale` is a symmetric, positive definite, `k x k` matrix,
* `Z` is the normalizing constant, and,
* `Gamma_k` is the [multivariate Gamma function](
https://en.wikipedia.org/wiki/Multivariate_gamma_function).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3x3 Wishart with Full factored scale matrix and 5
# degrees-of-freedom.(*)
df = 5
scale = ... # Shape is [3, 3]; positive definite.
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on an observation in R^3, returning a scalar.
x = ... # A 3x3 positive definite matrix.
dist.prob(x) # Shape is [], a scalar.
# Evaluate this on a two observations, each in R^{3x3}, returning a length two
# Tensor.
x = [x0, x1] # Shape is [2, 3, 3].
dist.prob(x) # Shape is [2].
# Initialize two 3x3 Wisharts with Full factored scale matrices.
df = [5, 4]
scale = ... # Shape is [2, 3, 3].
dist = tfd.WishartFull(df=df, scale=scale)
# Evaluate this on four observations.
x = [[x0, x1], [x2, x3]] # Shape is [2, 2, 3, 3]; xi is positive definite.
dist.prob(x) # Shape is [2, 2].
# (*) - To efficiently create a trainable covariance matrix, see the example
# in tfd.matrix_diag_transform.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
scale,
cholesky_input_output_matrices=False,
validate_args=False,
allow_nan_stats=True,
name="WishartFull"):
"""Construct Wishart distributions.
Args:
df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: Python `bool`. Any function which whose
input or output is a matrix assumes the input is Cholesky and returns a
Cholesky factored matrix. Example `log_prob` input takes a Cholesky and
`sample_n` returns a Cholesky when
`cholesky_input_output_matrices=True`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[scale]):
scale = ops.convert_to_tensor(scale)
if validate_args:
scale = distribution_util.assert_symmetric(scale)
chol = linalg_ops.cholesky(scale)
chol = control_flow_ops.with_dependencies([
check_ops.assert_positive(array_ops.matrix_diag_part(chol))
] if validate_args else [], chol)
super(WishartFull, self).__init__(
df=df,
scale_operator=linalg.LinearOperatorLowerTriangular(
tril=chol,
is_non_singular=True,
is_positive_definite=True,
is_square=True),
cholesky_input_output_matrices=cholesky_input_output_matrices,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/wishart.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalDiagPlusLowRank",
]
class MultivariateNormalDiagPlusLowRank(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian with covariance `cov = S @ S.T`,
# `S = diag(d) + U @ diag(m) @ U.T`. The perturbation, `U @ diag(m) @ U.T`, is
# a rank-2 update.
mu = [-0.5., 0, 0.5] # shape: [3]
d = [1.5, 0.5, 2] # shape: [3]
U = [[1., 2],
[-1, 1],
[2, -0.5]] # shape: [3, 2]
m = [4., 5] # shape: [2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu
scale_diag=d
scale_perturb_factor=U,
scale_perturb_diag=m)
# Evaluate this on an observation in `R^3`, returning a scalar.
mvn.prob([-1, 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians; `S = diag(d) + U @ U.T`.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [b, k] = [2, 3]
U = [[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1,0, 0.25],
[1.5, 1.25]]] # shape: [b, k, r] = [2, 3, 2]
m = [[0.1, 0.2],
[0.4, 0.5]] # shape: [b, r] = [2, 2]
mvn = tfd.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=U,
scale_perturb_diag=m)
mvn.covariance().eval() # shape: [2, 3, 3]
# ==> [[[ 15.63 31.57 48.51]
# [ 31.57 69.31 105.05]
# [ 48.51 105.05 162.59]]
#
# [[ 2.59 1.41 3.35]
# [ 1.41 2.71 3.34]
# [ 3.35 3.34 8.35]]]
# Compute the pdf of two `R^3` observations (one from each batch);
# return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusLowRank"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
scale_perturb_factor: Floating-point `Tensor` representing a rank-`r`
perturbation added to `scale`. May have shape `[B1, ..., Bb, k, r]`,
`b >= 0`, and characterizes `b`-batches of rank-`r` updates to `scale`.
When `None`, no rank-`r` update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing a diagonal matrix
inside the rank-`r` perturbation added to `scale`. May have shape
`[B1, ..., Bb, r]`, `b >= 0`, and characterizes `b`-batches of `r x r`
diagonal matrices inside the perturbation added to `scale`. When
`None`, an identity matrix is used inside the perturbation. Can only be
specified if `scale_perturb_factor` is also specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,
scale_perturb_diag]):
has_low_rank = (scale_perturb_factor is not None or
scale_perturb_diag is not None)
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=validate_args,
assert_positive=has_low_rank)
scale_perturb_factor = _convert_to_tensor(
scale_perturb_factor,
name="scale_perturb_factor")
scale_perturb_diag = _convert_to_tensor(
scale_perturb_diag,
name="scale_perturb_diag")
if has_low_rank:
scale = linalg.LinearOperatorLowRankUpdate(
scale,
u=scale_perturb_factor,
diag_update=scale_perturb_diag,
is_diag_update_positive=scale_perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
super(MultivariateNormalDiagPlusLowRank, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.ops.distributions import distribution as distribution_lib
# The following two lines are redundant, in a sense. The first enables
# good coding practice *within* this file (`util.prefer_static_value`
# rather than `prefer_static_value`). The second ensures that users
# also get the core utils when they import this file.
from tensorflow.python.ops.distributions import util
from tensorflow.python.ops.distributions.util import * # pylint: disable=wildcard-import
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector):
"""Computes the standard deviation of a mixture distribution.
This function works regardless of the component distribution, so long as
each component's mean and standard deviation can be provided.
Args:
mixture_weight_vector: A 2D tensor with shape [batch_size, num_components]
mean_vector: A 2D tensor of mixture component means. Has shape
`[batch_size, num_components]`.
stddev_vector: A 2D tensor of mixture component standard deviations. Has
shape `[batch_size, num_components]`.
Returns:
A 1D tensor of shape `[batch_size]` representing the standard deviation of
the mixture distribution with given weights and component means and standard
deviations.
Raises:
ValueError: If the shapes of the input tensors are not as expected.
"""
mixture_weight_vector.shape.assert_has_rank(2)
if not mean_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting means to have same shape as mixture weights.")
if not stddev_vector.shape.is_compatible_with(mixture_weight_vector.shape):
raise ValueError("Expecting stddevs to have same shape as mixture weights.")
# Reshape the distribution parameters for batched vectorized dot products.
pi_for_dot_prod = array_ops.expand_dims(mixture_weight_vector, axis=1)
mu_for_dot_prod = array_ops.expand_dims(mean_vector, axis=2)
sigma_for_dot_prod = array_ops.expand_dims(stddev_vector, axis=2)
# weighted average of component means under mixture distribution.
mean_wa = math_ops.matmul(pi_for_dot_prod, mu_for_dot_prod)
mean_wa = array_ops.reshape(mean_wa, (-1,))
# weighted average of component variances under mixture distribution.
var_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(sigma_for_dot_prod))
var_wa = array_ops.reshape(var_wa, (-1,))
# weighted average of component squared means under mixture distribution.
sq_mean_wa = math_ops.matmul(pi_for_dot_prod,
math_ops.square(mu_for_dot_prod))
sq_mean_wa = array_ops.reshape(sq_mean_wa, (-1,))
mixture_variance = var_wa + sq_mean_wa - math_ops.square(mean_wa)
return math_ops.sqrt(mixture_variance)
def make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a lower triangular matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to the LinOp.
The upper triangular elements above the diagonal are ignored.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
array_ops.matrix_diag_part(x),
message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
array_ops.matrix_diag_part(x),
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
with ops.name_scope(name, "make_tril_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_tril is not None:
scale_tril = array_ops.matrix_band_part(scale_tril, -1, 0) # Zero out TriU.
tril_diag = array_ops.matrix_diag_part(scale_tril)
if scale_diag is not None:
tril_diag += scale_diag
if scale_identity_multiplier is not None:
tril_diag += scale_identity_multiplier[..., array_ops.newaxis]
scale_tril = array_ops.matrix_set_diag(scale_tril, tril_diag)
return linalg.LinearOperatorLowerTriangular(
tril=_maybe_attach_assertion(scale_tril),
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=assert_positive)
return make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
shape_hint=shape_hint,
validate_args=validate_args,
assert_positive=assert_positive,
name=name)
def make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None):
"""Creates a LinOp representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to the LinOp.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
assert_positive: Python `bool` indicating whether LinOp should be checked
for being positive definite.
name: Python `str` name given to ops managed by this object.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
check_ops.assert_positive(
x, message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
check_ops.assert_none_equal(
x,
array_ops.zeros([], x.dtype),
message="diagonal part must be non-zero")], x)
with ops.name_scope(name, "make_diag_scale",
values=[loc, scale_diag, scale_identity_multiplier]):
loc = _convert_to_tensor(loc, name="loc")
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag")
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier")
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., array_ops.newaxis]
return linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError(
"Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
return linalg.LinearOperatorIdentity(
num_rows=shape_hint,
dtype=loc.dtype.base_dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return linalg.LinearOperatorScaledIdentity(
num_rows=shape_hint,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args)
def shapes_from_loc_and_scale(loc, scale, name="shapes_from_loc_and_scale"):
"""Infer distribution batch and event shapes from a location and scale.
Location and scale family distributions determine their batch/event shape by
broadcasting the `loc` and `scale` args. This helper does that broadcast,
statically if possible.
Batch shape broadcasts as per the normal rules.
We allow the `loc` event shape to broadcast up to that of `scale`. We do not
allow `scale`'s event shape to change. Therefore, the last dimension of `loc`
must either be size `1`, or the same as `scale.range_dimension`.
See `MultivariateNormalLinearOperator` for a usage example.
Args:
loc: `N-D` `Tensor` with `N >= 1` (already converted to tensor) or `None`.
If `None`, both batch and event shape are determined by `scale`.
scale: A `LinearOperator` instance.
name: A string name to prepend to created ops.
Returns:
batch_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
event_shape: `TensorShape` (if broadcast is done statically), or `Tensor`.
Raises:
ValueError: If the last dimension of `loc` is determined statically to be
different than the range of `scale`.
"""
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Get event shape.
event_size = scale.range_dimension_tensor()
event_size_const = tensor_util.constant_value(event_size)
if event_size_const is not None:
event_shape = event_size_const.reshape([1])
else:
event_shape = event_size[array_ops.newaxis]
# Static check that event shapes match.
if loc is not None:
loc_event_size = tensor_shape.dimension_value(loc.get_shape()[-1])
if loc_event_size is not None and event_size_const is not None:
if loc_event_size != 1 and loc_event_size != event_size_const:
raise ValueError(
"Event size of 'scale' (%d) could not be broadcast up to that of "
"'loc' (%d)." % (loc_event_size, event_size_const))
# Get batch shape.
batch_shape = scale.batch_shape_tensor()
if loc is None:
batch_shape_const = tensor_util.constant_value(batch_shape)
batch_shape = (
batch_shape_const if batch_shape_const is not None else batch_shape)
else:
loc_batch_shape = loc.get_shape().with_rank_at_least(1)[:-1]
if (loc.get_shape().ndims is None or
not loc_batch_shape.is_fully_defined()):
loc_batch_shape = array_ops.shape(loc)[:-1]
else:
loc_batch_shape = ops.convert_to_tensor(loc_batch_shape,
name="loc_batch_shape")
# This is defined in the core util module.
# pylint: disable=undefined-variable
batch_shape = prefer_static_broadcast_shape(batch_shape, loc_batch_shape)
# pylint: enable=undefined-variable
return batch_shape, event_shape
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = array_ops.broadcast_static_shape(s_shape, t.shape)
if s_shape.is_fully_defined():
return s_shape.as_list()
# Fallback on dynamic.
d_shape = array_ops.shape(tensors[0])
for t in tensors[1:]:
d_shape = array_ops.broadcast_dynamic_shape(d_shape, array_ops.shape(t))
return d_shape
def is_diagonal_scale(scale):
"""Returns `True` if `scale` is a `LinearOperator` that is known to be diag.
Args:
scale: `LinearOperator` instance.
Returns:
Python `bool`.
Raises:
TypeError: If `scale` is not a `LinearOperator`.
"""
if not isinstance(scale, linalg.LinearOperator):
raise TypeError("Expected argument 'scale' to be instance of LinearOperator"
". Found: %s" % scale)
return (isinstance(scale, linalg.LinearOperatorIdentity) or
isinstance(scale, linalg.LinearOperatorScaledIdentity) or
isinstance(scale, linalg.LinearOperatorDiag))
def maybe_check_scalar_distribution(
distribution, expected_base_dtype, validate_args):
"""Helper which checks validity of a scalar `distribution` init arg.
Valid here means:
* `distribution` has scalar batch and event shapes.
* `distribution` is `FULLY_REPARAMETERIZED`
* `distribution` has expected dtype.
Args:
distribution: `Distribution`-like object.
expected_base_dtype: `TensorFlow` `dtype`.
validate_args: Python `bool`. Whether to do additional checks:
(i) check that reparameterization_type is `FULLY_REPARAMETERIZED`.
(ii) add `tf.Assert` ops to the graph to enforce that distribution
is scalar in the event that this cannot be determined statically.
Returns:
List of `tf.Assert` ops to run to enforce validity checks that could not
be statically determined. Empty if `not validate_args`.
Raises:
ValueError: If validate_args and distribution is not FULLY_REPARAMETERIZED
ValueError: If distribution is statically determined to not have both
scalar batch and scalar event shapes.
"""
if distribution.dtype != expected_base_dtype:
raise TypeError("dtype mismatch; "
"distribution.dtype=\"{}\" is not \"{}\"".format(
distribution.dtype.name, expected_base_dtype.name))
# Although `reparameterization_type` is a static property, we guard it by
# `validate_args`. This allows users to use a `distribution` which is not
# reparameterized itself. However, we tacitly assume that although the
# distribution is not reparameterized, it only depends on non-trainable
# variables.
if validate_args and (distribution.reparameterization_type
!= distribution_lib.FULLY_REPARAMETERIZED):
raise ValueError("Base distribution should be reparameterized or be "
"a function of non-trainable variables; "
"distribution.reparameterization_type = \"{}\" "
"!= \"FULLY_REPARAMETERIZED\".".format(
distribution.reparameterization_type))
with ops.name_scope(name="check_distribution"):
assertions = []
def check_is_scalar(is_scalar, name):
is_scalar_ = static_value(is_scalar)
if is_scalar_ is not None:
if not is_scalar_:
raise ValueError("distribution must be scalar; "
"distribution.{}=False is not True".format(name))
elif validate_args:
assertions.append(check_ops.assert_equal(
is_scalar, True,
message=("distribution must be scalar; "
"distribution.{}=False is not True".format(name))))
check_is_scalar(distribution.is_scalar_event(), "is_scalar_event")
check_is_scalar(distribution.is_scalar_batch(), "is_scalar_batch")
return assertions
def pad_mixture_dimensions(x, mixture_distribution, categorical_distribution,
event_ndims):
"""Pad dimensions of event tensors for mixture distributions.
See `Mixture._sample_n` and `MixtureSameFamily._sample_n` for usage examples.
Args:
x: event tensor to pad.
mixture_distribution: Base distribution of the mixture.
categorical_distribution: `Categorical` distribution that mixes the base
distribution.
event_ndims: Integer specifying the number of event dimensions in the event
tensor.
Returns:
A padded version of `x` that can broadcast with `categorical_distribution`.
"""
with ops.name_scope("pad_mix_dims", values=[x]):
def _get_ndims(d):
if d.batch_shape.ndims is not None:
return d.batch_shape.ndims
return array_ops.shape(d.batch_shape_tensor())[0]
dist_batch_ndims = _get_ndims(mixture_distribution)
cat_batch_ndims = _get_ndims(categorical_distribution)
pad_ndims = array_ops.where_v2(categorical_distribution.is_scalar_batch(),
dist_batch_ndims,
dist_batch_ndims - cat_batch_ndims)
s = array_ops.shape(x)
x = array_ops.reshape(x, shape=array_ops.concat([
s[:-1],
array_ops.ones([pad_ndims], dtype=dtypes.int32),
s[-1:],
array_ops.ones([event_ndims], dtype=dtypes.int32),
], axis=0))
return x
def static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def move_dimension(x, source_idx, dest_idx):
"""Move a single tensor dimension within its shape.
This is a special case of `tf.transpose()`, which applies
arbitrary permutations to tensor dimensions.
Args:
x: Tensor of rank `ndims`.
source_idx: Integer index into `x.shape` (negative indexing is
supported).
dest_idx: Integer index into `x.shape` (negative indexing is
supported).
Returns:
x_perm: Tensor of rank `ndims`, in which the dimension at original
index `source_idx` has been moved to new index `dest_idx`, with
all other dimensions retained in their original order.
Example:
```python
x = tf.compat.v1.placeholder(shape=[200, 30, 4, 1, 6])
x_perm = _move_dimension(x, 1, 1) # no-op
x_perm = _move_dimension(x, 0, 3) # result shape [30, 4, 1, 200, 6]
x_perm = _move_dimension(x, 0, -2) # equivalent to previous
x_perm = _move_dimension(x, 4, 2) # result shape [200, 30, 6, 4, 1]
```
"""
ndims = util.prefer_static_rank(x)
if isinstance(source_idx, int):
dtype = dtypes.int32
else:
dtype = dtypes.as_dtype(source_idx.dtype)
# Handle negative indexing. Since ndims might be dynamic, this makes
# source_idx and dest_idx also possibly dynamic.
if source_idx < 0:
source_idx = ndims + source_idx
if dest_idx < 0:
dest_idx = ndims + dest_idx
# Construct the appropriate permutation of dimensions, depending
# whether the source is before or after the destination.
def move_left_permutation():
return util.prefer_static_value(
array_ops.concat([
math_ops.range(0, dest_idx, dtype=dtype),
[source_idx],
math_ops.range(dest_idx, source_idx, dtype=dtype),
math_ops.range(source_idx+1, ndims, dtype=dtype)], axis=0))
def move_right_permutation():
return util.prefer_static_value(
array_ops.concat([
math_ops.range(0, source_idx, dtype=dtype),
math_ops.range(source_idx+1, dest_idx+1, dtype=dtype),
[source_idx],
math_ops.range(dest_idx+1, ndims, dtype=dtype)], axis=0))
def x_permuted():
return array_ops.transpose(
x, perm=smart_cond.smart_cond(source_idx < dest_idx,
move_right_permutation,
move_left_permutation))
# One final conditional to handle the special case where source
# and destination indices are equal.
return smart_cond.smart_cond(math_ops.equal(source_idx, dest_idx),
lambda: x,
x_permuted)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/distribution_util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The BatchReshape distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.util import deprecation
__all__ = [
"BatchReshape",
]
class BatchReshape(distribution_lib.Distribution):
"""The Batch-Reshaping distribution.
This "meta-distribution" reshapes the batch dimensions of another
distribution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dtype = np.float32
dims = 2
new_batch_shape = [1, 2, -1]
old_batch_shape = [6]
scale = np.ones(old_batch_shape + [dims], dtype)
mvn = tfd.MultivariateNormalDiag(scale_diag=scale)
reshape_mvn = tfd.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape,
validate_args=True)
reshape_mvn.batch_shape
# ==> [1, 2, 3]
x = reshape_mvn.sample(sample_shape=[4, 5])
x.shape
# ==> [4, 5, 1, 2, 3, 2] == sample_shape + new_batch_shape + [dims]
reshape_mvn.log_prob(x).shape
# ==> [4, 5, 1, 2, 3] == sample_shape + new_batch_shape
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution,
batch_shape,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct BatchReshape distribution.
Args:
distribution: The base distribution instance to reshape. Typically an
instance of `Distribution`.
batch_shape: Positive `int`-like vector-shaped `Tensor` representing
the new shape of the batch dimensions. Up to one dimension may contain
`-1`, meaning the remainder of the batch size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Default value: `"BatchReshape" + distribution.name`.
Raises:
ValueError: if `batch_shape` is not a vector.
ValueError: if `batch_shape` has non-positive elements.
ValueError: if `batch_shape` size is not the same as a
`distribution.batch_shape` size.
"""
parameters = dict(locals())
name = name or "BatchReshape" + distribution.name
with ops.name_scope(name, values=[batch_shape]) as name:
# The unexpanded batch shape may contain up to one dimension of -1.
self._batch_shape_unexpanded = ops.convert_to_tensor(
batch_shape, dtype=dtypes.int32, name="batch_shape")
validate_init_args_statically(distribution, self._batch_shape_unexpanded)
batch_shape, batch_shape_static, runtime_assertions = calculate_reshape(
distribution.batch_shape_tensor(), self._batch_shape_unexpanded,
validate_args)
self._distribution = distribution
self._batch_shape_ = batch_shape
self._batch_shape_static = batch_shape_static
self._runtime_assertions = runtime_assertions
super(BatchReshape, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
[self._batch_shape_unexpanded] + distribution._graph_parents), # pylint: disable=protected-access
name=name)
@property
def distribution(self):
return self._distribution
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self._batch_shape_)
def _batch_shape(self):
return self._batch_shape_static
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
return array_ops.identity(self.distribution.event_shape_tensor())
def _event_shape(self):
return self.distribution.event_shape
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._runtime_assertions):
x = self.distribution.sample(sample_shape=n, seed=seed)
new_shape = array_ops.concat(
[
[n],
self._batch_shape_unexpanded,
self.event_shape_tensor(),
],
axis=0)
return array_ops.reshape(x, new_shape)
def _log_prob(self, x):
return self._call_reshape_input_output(
self.distribution.log_prob, x)
def _prob(self, x):
return self._call_reshape_input_output(
self.distribution.prob, x)
def _log_cdf(self, x):
return self._call_reshape_input_output(
self.distribution.log_cdf, x)
def _cdf(self, x):
return self._call_reshape_input_output(
self.distribution.cdf, x)
def _log_survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.log_survival_function, x)
def _survival_function(self, x):
return self._call_reshape_input_output(
self.distribution.survival_function, x)
def _entropy(self):
return self._call_and_reshape_output(self.distribution.entropy, [],
[tensor_shape.TensorShape([])])
def _mean(self):
return self._call_and_reshape_output(self.distribution.mean)
def _mode(self):
return self._call_and_reshape_output(self.distribution.mode)
def _stddev(self):
return self._call_and_reshape_output(self.distribution.stddev)
def _variance(self):
return self._call_and_reshape_output(self.distribution.variance)
def _covariance(self):
return self._call_and_reshape_output(
self.distribution.covariance,
[self.event_shape_tensor()]*2,
[self.event_shape]*2)
def _sample_shape(self, x):
"""Computes graph and static `sample_shape`."""
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
sample_ndims = x_ndims - batch_ndims - event_ndims
if isinstance(sample_ndims, int):
static_sample_shape = x.shape[:sample_ndims]
else:
static_sample_shape = tensor_shape.TensorShape(None)
if static_sample_shape.is_fully_defined():
sample_shape = np.int32(static_sample_shape.as_list())
else:
sample_shape = array_ops.shape(x)[:sample_ndims]
return sample_shape, static_sample_shape
def _call_reshape_input_output(self, fn, x):
"""Calls `fn`, appropriately reshaping its input `x` and output."""
with ops.control_dependencies(
self._runtime_assertions + self._validate_sample_arg(x)):
sample_shape, static_sample_shape = self._sample_shape(x)
old_shape = array_ops.concat([
sample_shape,
self.distribution.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
result = fn(array_ops.reshape(x, old_shape))
new_shape = array_ops.concat(
[
sample_shape,
self._batch_shape_unexpanded,
], axis=0)
result = array_ops.reshape(result, new_shape)
if (static_sample_shape.ndims is not None and
self.batch_shape.ndims is not None):
new_shape = static_sample_shape.concatenate(self.batch_shape)
result.set_shape(result.shape.merge_with(new_shape))
return result
def _call_and_reshape_output(
self,
fn,
event_shape_list=None,
static_event_shape_list=None):
"""Calls `fn` and appropriately reshapes its output."""
with ops.control_dependencies(self._runtime_assertions):
if event_shape_list is None:
event_shape_list = [self._event_shape_tensor()]
if static_event_shape_list is None:
static_event_shape_list = [self.event_shape]
new_shape = array_ops.concat(
[self._batch_shape_unexpanded] + event_shape_list, axis=0)
result = array_ops.reshape(fn(), new_shape)
if (self.batch_shape.ndims is not None and
self.event_shape.ndims is not None):
event_shape = tensor_shape.TensorShape([])
for rss in static_event_shape_list:
event_shape = event_shape.concatenate(rss)
static_shape = result.shape.merge_with(
self.batch_shape.concatenate(event_shape))
result.set_shape(static_shape)
return result
def _validate_sample_arg(self, x):
"""Helper which validates sample arg, e.g., input to `log_prob`."""
with ops.name_scope(name="validate_sample_arg", values=[x]):
x_ndims = (array_ops.rank(x) if x.shape.ndims is None else x.shape.ndims)
event_ndims = (array_ops.size(self.event_shape_tensor())
if self.event_shape.ndims is None
else self.event_shape.ndims)
batch_ndims = (
array_ops.size(self._batch_shape_unexpanded)
if self.batch_shape.ndims is None else self.batch_shape.ndims)
expected_batch_event_ndims = batch_ndims + event_ndims
if (isinstance(x_ndims, int) and
isinstance(expected_batch_event_ndims, int)):
if x_ndims < expected_batch_event_ndims:
raise NotImplementedError(
"Broadcasting is not supported; too few batch and event dims "
"(expected at least {}, saw {}).".format(
expected_batch_event_ndims, x_ndims))
ndims_assertion = []
elif self.validate_args:
ndims_assertion = [
check_ops.assert_greater_equal(
x_ndims,
expected_batch_event_ndims,
message=("Broadcasting is not supported; too few "
"batch and event dims."),
name="assert_batch_and_event_ndims_large_enough"),
]
if (self.batch_shape.is_fully_defined() and
self.event_shape.is_fully_defined()):
expected_batch_event_shape = np.int32(self.batch_shape.concatenate(
self.event_shape).as_list())
else:
expected_batch_event_shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], axis=0)
sample_ndims = x_ndims - expected_batch_event_ndims
if isinstance(sample_ndims, int):
sample_ndims = max(sample_ndims, 0)
if (isinstance(sample_ndims, int) and
x.shape[sample_ndims:].is_fully_defined()):
actual_batch_event_shape = np.int32(x.shape[sample_ndims:].as_list())
else:
sample_ndims = math_ops.maximum(sample_ndims, 0)
actual_batch_event_shape = array_ops.shape(x)[sample_ndims:]
if (isinstance(expected_batch_event_shape, np.ndarray) and
isinstance(actual_batch_event_shape, np.ndarray)):
if any(expected_batch_event_shape != actual_batch_event_shape):
raise NotImplementedError("Broadcasting is not supported; "
"unexpected batch and event shape "
"(expected {}, saw {}).".format(
expected_batch_event_shape,
actual_batch_event_shape))
# We need to set the final runtime-assertions to `ndims_assertion` since
# its possible this assertion was created. We could add a condition to
# only do so if `self.validate_args == True`, however this is redundant
# as `ndims_assertion` already encodes this information.
runtime_assertions = ndims_assertion
elif self.validate_args:
# We need to make the `ndims_assertion` a control dep because otherwise
# TF itself might raise an exception owing to this assertion being
# ill-defined, ie, one cannot even compare different rank Tensors.
with ops.control_dependencies(ndims_assertion):
shape_assertion = check_ops.assert_equal(
expected_batch_event_shape,
actual_batch_event_shape,
message=("Broadcasting is not supported; "
"unexpected batch and event shape."),
name="assert_batch_and_event_shape_same")
runtime_assertions = [shape_assertion]
else:
runtime_assertions = []
return runtime_assertions
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
if batch_shape_static.is_fully_defined():
return np.int32(batch_shape_static.as_list()), batch_shape_static, []
with ops.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
original_size = math_ops.reduce_prod(original_shape)
implicit_dim = math_ops.equal(new_shape, -1)
size_implicit_dim = (
original_size // math_ops.maximum(1, -math_ops.reduce_prod(new_shape)))
new_ndims = array_ops.shape(new_shape)
expanded_new_shape = array_ops.where_v2( # Assumes exactly one `-1`.
implicit_dim, array_ops.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [
check_ops.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
check_ops.assert_rank(
new_shape, 1, message="New shape must be a vector."),
check_ops.assert_less_equal(
math_ops.count_nonzero(implicit_dim, dtype=dtypes.int32),
1,
message="At most one dimension can be unknown."),
check_ops.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
check_ops.assert_equal(
math_ops.reduce_prod(expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def validate_init_args_statically(distribution, batch_shape):
"""Helper to __init__ which makes or raises assertions."""
if batch_shape.shape.ndims is not None:
if batch_shape.shape.ndims != 1:
raise ValueError("`batch_shape` must be a vector "
"(saw rank: {}).".format(batch_shape.shape.ndims))
batch_shape_static = tensor_util.constant_value_as_shape(batch_shape)
batch_size_static = batch_shape_static.num_elements()
dist_batch_size_static = distribution.batch_shape.num_elements()
if batch_size_static is not None and dist_batch_size_static is not None:
if batch_size_static != dist_batch_size_static:
raise ValueError("`batch_shape` size ({}) must match "
"`distribution.batch_shape` size ({}).".format(
batch_size_static, dist_batch_size_static))
if batch_shape_static.dims is not None:
if any(
dim.value is not None and
dim.value < 1 for dim in batch_shape_static.dims):
raise ValueError("`batch_shape` elements must be >=-1.")
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/batch_reshape.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Chi2 distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import gamma
from tensorflow.python.util import deprecation
__all__ = [
"Chi2",
"Chi2WithAbsDf",
]
class Chi2(gamma.Gamma):
"""Chi2 distribution.
The Chi2 distribution is defined over positive real numbers using a degrees of
freedom ("df") parameter.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; df, x > 0) = x**(0.5 df - 1) exp(-0.5 x) / Z
Z = 2**(0.5 df) Gamma(0.5 df)
```
where:
* `df` denotes the degrees of freedom,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The Chi2 distribution is a special case of the Gamma distribution, i.e.,
```python
Chi2(df) = Gamma(concentration=0.5 * df, rate=0.5)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2"):
"""Construct Chi2 distributions with parameter `df`.
Args:
df: Floating point tensor, the degrees of freedom of the
distribution(s). `df` must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
# Even though all stats of chi2 are defined for valid parameters, this is
# not true in the parent class "gamma." therefore, passing
# allow_nan_stats=True
# through to the parent class results in unnecessary asserts.
with ops.name_scope(name, values=[df]) as name:
with ops.control_dependencies([
check_ops.assert_positive(df),
] if validate_args else []):
self._df = array_ops.identity(df, name="df")
super(Chi2, self).__init__(
concentration=0.5 * self._df,
rate=constant_op.constant(0.5, dtype=self._df.dtype),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"df": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def df(self):
return self._df
class Chi2WithAbsDf(Chi2):
"""Chi2 with parameter transform `df = floor(abs(df))`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
validate_args=False,
allow_nan_stats=True,
name="Chi2WithAbsDf"):
parameters = dict(locals())
with ops.name_scope(name, values=[df]) as name:
super(Chi2WithAbsDf, self).__init__(
df=math_ops.floor(
math_ops.abs(df, name="abs_df"),
name="floor_abs_df"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/chi2.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
_binomial_sample_note = """
For each batch member of counts `value`, `P[value]` is the probability that
after sampling `self.total_count` draws from this Binomial distribution, the
number of successes is `value`. Since different sequences of draws can result in
the same counts, the probability includes a combinatorial coefficient.
Note: `value` must be a non-negative tensor with dtype `dtype` and whose shape
can be broadcast with `self.probs` and `self.total_count`. `value` is only legal
if it is less than or equal to `self.total_count` and its components are equal
to integer values.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _bdtr(k, n, p):
"""The binomial cumulative distribution function.
Args:
k: floating point `Tensor`.
n: floating point `Tensor`.
p: floating point `Tensor`.
Returns:
`sum_{j=0}^k p^j (1 - p)^(n - j)`.
"""
# Trick for getting safe backprop/gradients into n, k when
# betainc(a = 0, ..) = nan
# Write:
# where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))
ones = array_ops.ones_like(n - k)
k_eq_n = math_ops.equal(k, n)
safe_dn = array_ops.where_v2(k_eq_n, ones, n - k)
dk = math_ops.betainc(a=safe_dn, b=k + 1, x=1 - p)
return array_ops.where_v2(k_eq_n, ones, dk)
class Binomial(distribution.Distribution):
"""Binomial distribution.
This distribution is parameterized by `probs`, a (batch of) probabilities for
drawing a `1` and `total_count`, the number of trials per draw from the
Binomial.
#### Mathematical Details
The Binomial is a distribution over the number of `1`'s in `total_count`
independent trials, with each trial having the same probability of `1`, i.e.,
`probs`.
The probability mass function (pmf) is,
```none
pmf(k; n, p) = p**k (1 - p)**(n - k) / Z
Z = k! (n - k)! / n!
```
where:
* `total_count = n`,
* `probs = p`,
* `Z` is the normalizing constant, and,
* `n!` is the factorial of `n`.
#### Examples
Create a single distribution, corresponding to 5 coin flips.
```python
dist = Binomial(total_count=5., probs=.5)
```
Create a single distribution (using logits), corresponding to 5 coin flips.
```python
dist = Binomial(total_count=5., logits=0.)
```
Creates 3 distributions with the third distribution most likely to have
successes.
```python
p = [.2, .3, .8]
# n will be broadcast to [4., 4., 4.], to match p.
dist = Binomial(total_count=4., probs=p)
```
The distribution functions can be evaluated on counts.
```python
# counts same shape as p.
counts = [1., 2, 3]
dist.prob(counts) # Shape [3]
# p will be broadcast to [[.2, .3, .8], [.2, .3, .8]] to match counts.
counts = [[1., 2, 1], [2, 2, 4]]
dist.prob(counts) # Shape [2, 3]
# p will be broadcast to shape [5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.prob(counts) # Shape [5, 7, 3]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Binomial"):
"""Initialize a batch of Binomial distributions.
Args:
total_count: Non-negative floating point tensor with shape broadcastable
to `[N1,..., Nm]` with `m >= 0` and the same dtype as `probs` or
`logits`. Defines this as a batch of `N1 x ... x Nm` different Binomial
distributions. Its components should be equal to integer values.
logits: Floating point tensor representing the log-odds of a
positive event with shape broadcastable to `[N1,..., Nm]` `m >= 0`, and
the same dtype as `total_count`. Each entry represents logits for the
probability of success for independent Binomial distributions. Only one
of `logits` or `probs` should be passed in.
probs: Positive floating point tensor with shape broadcastable to
`[N1,..., Nm]` `m >= 0`, `probs in [0, 1]`. Each entry represents the
probability of success for independent Binomial distributions. Only one
of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._total_count = self._maybe_assert_valid_total_count(
ops.convert_to_tensor(total_count, name="total_count"),
validate_args)
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits,
probs=probs,
validate_args=validate_args,
name=name)
super(Binomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count,
self._logits,
self._probs],
name=name)
@property
def total_count(self):
"""Number of trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of drawing a `1`."""
return self._logits
@property
def probs(self):
"""Probability of drawing a `1`."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
@distribution_util.AppendDocstring(_binomial_sample_note)
def _log_prob(self, counts):
return self._log_unnormalized_prob(counts) - self._log_normalization(counts)
@distribution_util.AppendDocstring(_binomial_sample_note)
def _prob(self, counts):
return math_ops.exp(self._log_prob(counts))
def _cdf(self, counts):
counts = self._maybe_assert_valid_sample(counts)
probs = self.probs
if not (counts.shape.is_fully_defined()
and self.probs.shape.is_fully_defined()
and counts.shape.is_compatible_with(self.probs.shape)):
# If both shapes are well defined and equal, we skip broadcasting.
probs += array_ops.zeros_like(counts)
counts += array_ops.zeros_like(self.probs)
return _bdtr(k=counts, n=self.total_count, p=probs)
def _log_unnormalized_prob(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return (counts * math_ops.log(self.probs) +
(self.total_count - counts) * math_ops.log1p(-self.probs))
def _log_normalization(self, counts):
counts = self._maybe_assert_valid_sample(counts)
return (math_ops.lgamma(1. + self.total_count - counts)
+ math_ops.lgamma(1. + counts)
- math_ops.lgamma(1. + self.total_count))
def _mean(self):
return self.total_count * self.probs
def _variance(self):
return self._mean() * (1. - self.probs)
@distribution_util.AppendDocstring(
"""Note that when `(1 + total_count) * probs` is an integer, there are
actually two modes. Namely, `(1 + total_count) * probs` and
`(1 + total_count) * probs - 1` are both modes. Here we return only the
larger of the two modes.""")
def _mode(self):
return math_ops.floor((1. + self.total_count) * self.probs)
def _maybe_assert_valid_total_count(self, total_count, validate_args):
if not validate_args:
return total_count
return control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
total_count,
message="total_count must be non-negative."),
distribution_util.assert_integer_form(
total_count,
message="total_count cannot contain fractional components."),
], total_count)
def _maybe_assert_valid_sample(self, counts):
"""Check counts for proper shape, values, then return tensor version."""
if not self.validate_args:
return counts
counts = distribution_util.embed_check_nonnegative_integer_form(counts)
return control_flow_ops.with_dependencies([
check_ops.assert_less_equal(
counts, self.total_count,
message="counts are not less than or equal to n."),
], counts)
|
tensorflow-r1.15.5-nv23.03
|
tensorflow/contrib/distributions/python/ops/binomial.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.