python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class PoissonTest(test.TestCase):
def _make_poisson(self, rate, validate_args=False):
return poisson_lib.Poisson(rate=rate, validate_args=validate_args)
def testPoissonShape(self):
with self.cached_session():
lam = constant_op.constant([3.0] * 5)
poisson = self._make_poisson(rate=lam)
self.assertEqual(poisson.batch_shape_tensor().eval(), (5,))
self.assertEqual(poisson.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(poisson.event_shape_tensor().eval(), [])
self.assertEqual(poisson.event_shape, tensor_shape.TensorShape([]))
def testInvalidLam(self):
invalid_lams = [-.01, 0., -2.]
for lam in invalid_lams:
with self.cached_session():
with self.assertRaisesOpError("Condition x > 0"):
poisson = self._make_poisson(rate=lam, validate_args=True)
poisson.rate.eval()
def testPoissonLogPmf(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([3.0] * batch_size)
lam_v = 3.0
x = [2., 3., 4., 5., 6., 7.]
poisson = self._make_poisson(rate=lam)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.get_shape(), (6,))
self.assertAllClose(log_pmf.eval(), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.get_shape(), (6,))
self.assertAllClose(pmf.eval(), stats.poisson.pmf(x, lam_v))
def testPoissonLogPmfValidateArgs(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([3.0] * batch_size)
x = array_ops.placeholder(dtypes.float32, shape=[6])
feed_dict = {x: [2.5, 3.2, 4.3, 5.1, 6., 7.]}
poisson = self._make_poisson(rate=lam, validate_args=True)
# Non-integer
with self.assertRaisesOpError("cannot contain fractional components"):
log_pmf = poisson.log_prob(x)
log_pmf.eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x >= 0"):
log_pmf = poisson.log_prob([-1.])
log_pmf.eval(feed_dict=feed_dict)
poisson = self._make_poisson(rate=lam, validate_args=False)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.get_shape(), (6,))
pmf = poisson.prob(x)
self.assertEqual(pmf.get_shape(), (6,))
def testPoissonLogPmfMultidimensional(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(rate=lam)
log_pmf = poisson.log_prob(x)
self.assertEqual(log_pmf.get_shape(), (6, 3))
self.assertAllClose(log_pmf.eval(), stats.poisson.logpmf(x, lam_v))
pmf = poisson.prob(x)
self.assertEqual(pmf.get_shape(), (6, 3))
self.assertAllClose(pmf.eval(), stats.poisson.pmf(x, lam_v))
def testPoissonCDF(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([3.0] * batch_size)
lam_v = 3.0
x = [2., 3., 4., 5., 6., 7.]
poisson = self._make_poisson(rate=lam)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.get_shape(), (6,))
self.assertAllClose(log_cdf.eval(), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), stats.poisson.cdf(x, lam_v))
def testPoissonCDFNonIntegerValues(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([3.0] * batch_size)
lam_v = 3.0
x = np.array([2.2, 3.1, 4., 5.5, 6., 7.], dtype=np.float32)
poisson = self._make_poisson(rate=lam)
cdf = poisson.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
# The Poisson CDF should be valid on these non-integer values, and
# equal to igammac(1 + x, rate).
self.assertAllClose(cdf.eval(), special.gammaincc(1. + x, lam_v))
with self.assertRaisesOpError("cannot contain fractional components"):
poisson_validate = self._make_poisson(rate=lam, validate_args=True)
poisson_validate.cdf(x).eval()
def testPoissonCdfMultidimensional(self):
with self.cached_session():
batch_size = 6
lam = constant_op.constant([[2.0, 4.0, 5.0]] * batch_size)
lam_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
poisson = self._make_poisson(rate=lam)
log_cdf = poisson.log_cdf(x)
self.assertEqual(log_cdf.get_shape(), (6, 3))
self.assertAllClose(log_cdf.eval(), stats.poisson.logcdf(x, lam_v))
cdf = poisson.cdf(x)
self.assertEqual(cdf.get_shape(), (6, 3))
self.assertAllClose(cdf.eval(), stats.poisson.cdf(x, lam_v))
def testPoissonMean(self):
with self.cached_session():
lam_v = [1.0, 3.0, 2.5]
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mean().get_shape(), (3,))
self.assertAllClose(poisson.mean().eval(), stats.poisson.mean(lam_v))
self.assertAllClose(poisson.mean().eval(), lam_v)
def testPoissonVariance(self):
with self.cached_session():
lam_v = [1.0, 3.0, 2.5]
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.variance().get_shape(), (3,))
self.assertAllClose(poisson.variance().eval(), stats.poisson.var(lam_v))
self.assertAllClose(poisson.variance().eval(), lam_v)
def testPoissonStd(self):
with self.cached_session():
lam_v = [1.0, 3.0, 2.5]
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.stddev().get_shape(), (3,))
self.assertAllClose(poisson.stddev().eval(), stats.poisson.std(lam_v))
self.assertAllClose(poisson.stddev().eval(), np.sqrt(lam_v))
def testPoissonMode(self):
with self.cached_session():
lam_v = [1.0, 3.0, 2.5, 3.2, 1.1, 0.05]
poisson = self._make_poisson(rate=lam_v)
self.assertEqual(poisson.mode().get_shape(), (6,))
self.assertAllClose(poisson.mode().eval(), np.floor(lam_v))
def testPoissonMultipleMode(self):
with self.cached_session():
lam_v = [1.0, 3.0, 2.0, 4.0, 5.0, 10.0]
poisson = self._make_poisson(rate=lam_v)
# For the case where lam is an integer, the modes are: lam and lam - 1.
# In this case, we get back the larger of the two modes.
self.assertEqual((6,), poisson.mode().get_shape())
self.assertAllClose(lam_v, poisson.mode().eval())
def testPoissonSample(self):
with self.cached_session():
lam_v = 4.0
lam = constant_op.constant(lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
poisson = self._make_poisson(rate=lam)
samples = poisson.sample(n, seed=123456)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(), stats.poisson.mean(lam_v), rtol=.01)
self.assertAllClose(
sample_values.var(), stats.poisson.var(lam_v), rtol=.01)
def testPoissonSampleMultidimensionalMean(self):
with self.cached_session():
lam_v = np.array([np.arange(1, 51, dtype=np.float32)]) # 1 x 50
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= (k/rtol)**2, roughly ensures our sample mean should be
# within `k` std. deviations of actual up to rtol precision.
n = int(100e3)
samples = poisson.sample(n, seed=123456)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 1, 50))
self.assertEqual(sample_values.shape, (n, 1, 50))
self.assertAllClose(
sample_values.mean(axis=0),
stats.poisson.mean(lam_v),
rtol=.01,
atol=0)
def testPoissonSampleMultidimensionalVariance(self):
with self.cached_session():
lam_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
poisson = self._make_poisson(rate=lam_v)
# Choosing `n >= 2 * lam * (k/rtol)**2, roughly ensures our sample
# variance should be within `k` std. deviations of actual up to rtol
# precision.
n = int(300e3)
samples = poisson.sample(n, seed=123456)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 1, 10))
self.assertEqual(sample_values.shape, (n, 1, 10))
self.assertAllClose(
sample_values.var(axis=0), stats.poisson.var(lam_v), rtol=.03, atol=0)
class PoissonLogRateTest(PoissonTest):
def _make_poisson(self, rate, validate_args=False):
return poisson_lib.Poisson(
log_rate=math_ops.log(rate),
validate_args=validate_args)
def testInvalidLam(self):
# No need to worry about the non-negativity of `rate` when using the
# `log_rate` parameterization.
pass
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/poisson_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Independent distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_diag_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bernoulli as bernoulli_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class ProductDistributionTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testSampleAndLogProbUnivariate(self):
loc = np.float32([-1., 1])
scale = np.float32([0.1, 0.5])
with self.cached_session() as sess:
ind = independent_lib.Independent(
distribution=normal_lib.Normal(loc=loc, scale=scale),
reinterpreted_batch_ndims=1)
x = ind.sample([4, 5], seed=42)
log_prob_x = ind.log_prob(x)
x_, actual_log_prob_x = sess.run([x, log_prob_x])
self.assertEqual([], ind.batch_shape)
self.assertEqual([2], ind.event_shape)
self.assertEqual([4, 5, 2], x.shape)
self.assertEqual([4, 5], log_prob_x.shape)
expected_log_prob_x = stats.norm(loc, scale).logpdf(x_).sum(-1)
self.assertAllClose(expected_log_prob_x, actual_log_prob_x,
rtol=1e-5, atol=0.)
def testSampleAndLogProbMultivariate(self):
loc = np.float32([[-1., 1], [1, -1]])
scale = np.float32([1., 0.5])
with self.cached_session() as sess:
ind = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=loc,
scale_identity_multiplier=scale),
reinterpreted_batch_ndims=1)
x = ind.sample([4, 5], seed=42)
log_prob_x = ind.log_prob(x)
x_, actual_log_prob_x = sess.run([x, log_prob_x])
self.assertEqual([], ind.batch_shape)
self.assertEqual([2, 2], ind.event_shape)
self.assertEqual([4, 5, 2, 2], x.shape)
self.assertEqual([4, 5], log_prob_x.shape)
expected_log_prob_x = stats.norm(loc, scale[:, None]).logpdf(
x_).sum(-1).sum(-1)
self.assertAllClose(expected_log_prob_x, actual_log_prob_x,
rtol=1e-6, atol=0.)
def testSampleConsistentStats(self):
loc = np.float32([[-1., 1], [1, -1]])
scale = np.float32([1., 0.5])
n_samp = 1e4
with self.cached_session() as sess:
ind = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=loc,
scale_identity_multiplier=scale),
reinterpreted_batch_ndims=1)
x = ind.sample(int(n_samp), seed=42)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_var = math_ops.reduce_mean(
math_ops.squared_difference(x, sample_mean), axis=0)
sample_std = math_ops.sqrt(sample_var)
sample_entropy = -math_ops.reduce_mean(ind.log_prob(x), axis=0)
[
sample_mean_, sample_var_, sample_std_, sample_entropy_,
actual_mean_, actual_var_, actual_std_, actual_entropy_,
actual_mode_,
] = sess.run([
sample_mean, sample_var, sample_std, sample_entropy,
ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
])
self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
def testKLRaises(self):
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32(-1),
scale=np.float32(0.5)),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
ValueError, "Event shapes do not match"):
kullback_leibler.kl_divergence(ind1, ind2)
ind1 = independent_lib.Independent(
distribution=normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=1)
ind2 = independent_lib.Independent(
distribution=mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([-1., 1]),
scale_diag=np.float32([0.1, 0.5])),
reinterpreted_batch_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "different event shapes"):
kullback_leibler.kl_divergence(ind1, ind2)
def testKLScalarToMultivariate(self):
normal1 = normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5]))
ind1 = independent_lib.Independent(
distribution=normal1, reinterpreted_batch_ndims=1)
normal2 = normal_lib.Normal(
loc=np.float32([-3., 3]),
scale=np.float32([0.3, 0.3]))
ind2 = independent_lib.Independent(
distribution=normal2, reinterpreted_batch_ndims=1)
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(math_ops.reduce_sum(normal_kl, axis=-1)),
self.evaluate(ind_kl))
def testKLIdentity(self):
normal1 = normal_lib.Normal(
loc=np.float32([-1., 1]),
scale=np.float32([0.1, 0.5]))
# This is functionally just a wrapper around normal1,
# and doesn't change any outputs.
ind1 = independent_lib.Independent(
distribution=normal1, reinterpreted_batch_ndims=0)
normal2 = normal_lib.Normal(
loc=np.float32([-3., 3]),
scale=np.float32([0.3, 0.3]))
# This is functionally just a wrapper around normal2,
# and doesn't change any outputs.
ind2 = independent_lib.Independent(
distribution=normal2, reinterpreted_batch_ndims=0)
normal_kl = kullback_leibler.kl_divergence(normal1, normal2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(normal_kl), self.evaluate(ind_kl))
def testKLMultivariateToMultivariate(self):
# (1, 1, 2) batch of MVNDiag
mvn1 = mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([[[[-1., 1, 3.], [2., 4., 3.]]]]),
scale_diag=np.float32([[[0.2, 0.1, 5.], [2., 3., 4.]]]))
ind1 = independent_lib.Independent(
distribution=mvn1, reinterpreted_batch_ndims=2)
# (1, 1, 2) batch of MVNDiag
mvn2 = mvn_diag_lib.MultivariateNormalDiag(
loc=np.float32([[[[-2., 3, 2.], [1., 3., 2.]]]]),
scale_diag=np.float32([[[0.1, 0.5, 3.], [1., 2., 1.]]]))
ind2 = independent_lib.Independent(
distribution=mvn2, reinterpreted_batch_ndims=2)
mvn_kl = kullback_leibler.kl_divergence(mvn1, mvn2)
ind_kl = kullback_leibler.kl_divergence(ind1, ind2)
self.assertAllClose(
self.evaluate(math_ops.reduce_sum(mvn_kl, axis=[-1, -2])),
self.evaluate(ind_kl))
def _testMnistLike(self, static_shape):
sample_shape = [4, 5]
batch_shape = [10]
image_shape = [28, 28, 1]
logits = 3 * self._rng.random_sample(
batch_shape + image_shape).astype(np.float32) - 1
def expected_log_prob(x, logits):
return (x * logits - np.log1p(np.exp(logits))).sum(-1).sum(-1).sum(-1)
with self.cached_session() as sess:
logits_ph = array_ops.placeholder(
dtypes.float32, shape=logits.shape if static_shape else None)
ind = independent_lib.Independent(
distribution=bernoulli_lib.Bernoulli(logits=logits_ph))
x = ind.sample(sample_shape, seed=42)
log_prob_x = ind.log_prob(x)
[
x_,
actual_log_prob_x,
ind_batch_shape,
ind_event_shape,
x_shape,
log_prob_x_shape,
] = sess.run([
x,
log_prob_x,
ind.batch_shape_tensor(),
ind.event_shape_tensor(),
array_ops.shape(x),
array_ops.shape(log_prob_x),
], feed_dict={logits_ph: logits})
if static_shape:
ind_batch_shape = ind.batch_shape
ind_event_shape = ind.event_shape
x_shape = x.shape
log_prob_x_shape = log_prob_x.shape
self.assertAllEqual(batch_shape, ind_batch_shape)
self.assertAllEqual(image_shape, ind_event_shape)
self.assertAllEqual(sample_shape + batch_shape + image_shape, x_shape)
self.assertAllEqual(sample_shape + batch_shape, log_prob_x_shape)
self.assertAllClose(expected_log_prob(x_, logits),
actual_log_prob_x,
rtol=1e-6, atol=0.)
def testMnistLikeStaticShape(self):
self._testMnistLike(static_shape=True)
def testMnistLikeDynamicShape(self):
self._testMnistLike(static_shape=False)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/independent_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ConditionalTransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import transformed_distribution_test
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
ds = distributions
class _ChooseLocation(ConditionalBijector):
"""A Bijector which chooses between one of two location parameters."""
def __init__(self, loc, name="ChooseLocation"):
self._graph_parents = []
self._name = name
with self._name_scope("init", values=[loc]):
self._loc = ops.convert_to_tensor(loc, name="loc")
super(_ChooseLocation, self).__init__(
graph_parents=[self._loc],
is_constant_jacobian=True,
validate_args=False,
forward_min_event_ndims=0,
name=name)
def _forward(self, x, z):
return x + self._gather_loc(z)
def _inverse(self, x, z):
return x - self._gather_loc(z)
def _inverse_log_det_jacobian(self, x, event_ndims, z=None):
return 0.
def _gather_loc(self, z):
z = ops.convert_to_tensor(z)
z = math_ops.cast((1 + z) / 2, dtypes.int32)
return array_ops.gather(self._loc, z)
class ConditionalTransformedDistributionTest(
transformed_distribution_test.TransformedDistributionTest):
def _cls(self):
return ds.ConditionalTransformedDistribution
def testConditioning(self):
with self.cached_session():
conditional_normal = ds.ConditionalTransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=_ChooseLocation(loc=[-100., 100.]))
z = [-1, +1, -1, -1, +1]
self.assertAllClose(
np.sign(conditional_normal.sample(
5, bijector_kwargs={"z": z}).eval()), z)
class ConditionalScalarToMultiTest(
transformed_distribution_test.ScalarToMultiTest):
def _cls(self):
return ds.ConditionalTransformedDistribution
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/conditional_transformed_distribution_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import half_normal as hn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class HalfNormalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.cached_session():
param_shapes = hn_lib.HalfNormal.param_shapes(sample_shape)
scale_shape = param_shapes["scale"]
self.assertAllEqual(expected, scale_shape.eval())
scale = array_ops.ones(scale_shape)
self.assertAllEqual(
expected,
array_ops.shape(hn_lib.HalfNormal(scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = hn_lib.HalfNormal.param_static_shapes(sample_shape)
scale_shape = param_shapes["scale"]
self.assertEqual(expected, scale_shape)
def _testBatchShapes(self, dist, tensor):
self.assertAllEqual(dist.batch_shape_tensor().eval(), tensor.shape)
self.assertAllEqual(dist.batch_shape_tensor().eval(), tensor.eval().shape)
self.assertAllEqual(dist.batch_shape, tensor.shape)
self.assertAllEqual(dist.batch_shape, tensor.eval().shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testHalfNormalLogPDF(self):
with self.cached_session():
batch_size = 6
scale = constant_op.constant([3.0] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
halfnorm = hn_lib.HalfNormal(scale=scale)
log_pdf = halfnorm.log_prob(x)
self._testBatchShapes(halfnorm, log_pdf)
pdf = halfnorm.prob(x)
self._testBatchShapes(halfnorm, pdf)
if not stats:
return
expected_log_pdf = stats.halfnorm(scale=scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testHalfNormalLogPDFMultidimensional(self):
with self.cached_session():
batch_size = 6
scale = constant_op.constant([[3.0, 1.0]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
halfnorm = hn_lib.HalfNormal(scale=scale)
log_pdf = halfnorm.log_prob(x)
self._testBatchShapes(halfnorm, log_pdf)
pdf = halfnorm.prob(x)
self._testBatchShapes(halfnorm, pdf)
if not stats:
return
expected_log_pdf = stats.halfnorm(scale=scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testHalfNormalCDF(self):
with self.cached_session():
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
halfnorm = hn_lib.HalfNormal(scale=scale)
cdf = halfnorm.cdf(x)
self._testBatchShapes(halfnorm, cdf)
log_cdf = halfnorm.log_cdf(x)
self._testBatchShapes(halfnorm, log_cdf)
if not stats:
return
expected_logcdf = stats.halfnorm(scale=scale).logcdf(x)
self.assertAllClose(expected_logcdf, log_cdf.eval(), atol=0)
self.assertAllClose(np.exp(expected_logcdf), cdf.eval(), atol=0)
def testHalfNormalSurvivalFunction(self):
with self.cached_session():
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
halfnorm = hn_lib.HalfNormal(scale=scale)
sf = halfnorm.survival_function(x)
self._testBatchShapes(halfnorm, sf)
log_sf = halfnorm.log_survival_function(x)
self._testBatchShapes(halfnorm, log_sf)
if not stats:
return
expected_logsf = stats.halfnorm(scale=scale).logsf(x)
self.assertAllClose(expected_logsf, log_sf.eval(), atol=0)
self.assertAllClose(np.exp(expected_logsf), sf.eval(), atol=0)
def testHalfNormalQuantile(self):
with self.cached_session():
batch_size = 50
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0., 1.0, batch_size).astype(np.float64)
halfnorm = hn_lib.HalfNormal(scale=scale)
x = halfnorm.quantile(p)
self._testBatchShapes(halfnorm, x)
if not stats:
return
expected_x = stats.halfnorm(scale=scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0)
def testFiniteGradients(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
scale = variables.Variable(dtype(3.0))
dist = hn_lib.HalfNormal(scale=scale)
x = np.array([0.01, 0.1, 1., 5., 10.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_prob, dist.prob, dist.log_survival_function,
]:
print(func.__name__)
value = func(x)
grads = gradients_impl.gradients(value, [scale])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
def testHalfNormalEntropy(self):
with self.cached_session():
scale = np.array([[1.0, 2.0, 3.0]])
halfnorm = hn_lib.HalfNormal(scale=scale)
# See https://en.wikipedia.org/wiki/Half-normal_distribution for the
# entropy formula used here.
expected_entropy = 0.5 * np.log(np.pi * scale ** 2.0 / 2.0) + 0.5
entropy = halfnorm.entropy()
self._testBatchShapes(halfnorm, entropy)
self.assertAllClose(expected_entropy, entropy.eval())
def testHalfNormalMeanAndMode(self):
with self.cached_session():
scale = np.array([11., 12., 13.])
halfnorm = hn_lib.HalfNormal(scale=scale)
expected_mean = scale * np.sqrt(2.0) / np.sqrt(np.pi)
self.assertAllEqual((3,), halfnorm.mean().eval().shape)
self.assertAllEqual(expected_mean, halfnorm.mean().eval())
self.assertAllEqual((3,), halfnorm.mode().eval().shape)
self.assertAllEqual([0., 0., 0.], halfnorm.mode().eval())
def testHalfNormalVariance(self):
with self.cached_session():
scale = np.array([7., 7., 7.])
halfnorm = hn_lib.HalfNormal(scale=scale)
expected_variance = scale ** 2.0 * (1.0 - 2.0 / np.pi)
self.assertAllEqual((3,), halfnorm.variance().eval().shape)
self.assertAllEqual(expected_variance, halfnorm.variance().eval())
def testHalfNormalStandardDeviation(self):
with self.cached_session():
scale = np.array([7., 7., 7.])
halfnorm = hn_lib.HalfNormal(scale=scale)
expected_variance = scale ** 2.0 * (1.0 - 2.0 / np.pi)
self.assertAllEqual((3,), halfnorm.stddev().shape)
self.assertAllEqual(np.sqrt(expected_variance), halfnorm.stddev().eval())
def testHalfNormalSample(self):
with self.cached_session():
scale = constant_op.constant(3.0)
n = constant_op.constant(100000)
halfnorm = hn_lib.HalfNormal(scale=scale)
sample = halfnorm.sample(n)
self.assertEqual(sample.eval().shape, (100000,))
self.assertAllClose(sample.eval().mean(),
3.0 * np.sqrt(2.0) / np.sqrt(np.pi), atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(halfnorm.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, sample.shape)
self.assertAllEqual(expected_shape, sample.eval().shape)
expected_shape_static = (tensor_shape.TensorShape(
[n.eval()]).concatenate(halfnorm.batch_shape))
self.assertAllEqual(expected_shape_static, sample.shape)
self.assertAllEqual(expected_shape_static, sample.eval().shape)
def testHalfNormalSampleMultiDimensional(self):
with self.cached_session():
batch_size = 2
scale = constant_op.constant([[2.0, 3.0]] * batch_size)
n = constant_op.constant(100000)
halfnorm = hn_lib.HalfNormal(scale=scale)
sample = halfnorm.sample(n)
self.assertEqual(sample.shape, (100000, batch_size, 2))
self.assertAllClose(sample.eval()[:, 0, 0].mean(),
2.0 * np.sqrt(2.0) / np.sqrt(np.pi), atol=1e-1)
self.assertAllClose(sample.eval()[:, 0, 1].mean(),
3.0 * np.sqrt(2.0) / np.sqrt(np.pi), atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(halfnorm.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, sample.shape)
self.assertAllEqual(expected_shape, sample.eval().shape)
expected_shape_static = (tensor_shape.TensorShape(
[n.eval()]).concatenate(halfnorm.batch_shape))
self.assertAllEqual(expected_shape_static, sample.shape)
self.assertAllEqual(expected_shape_static, sample.eval().shape)
def testNegativeSigmaFails(self):
with self.cached_session():
halfnorm = hn_lib.HalfNormal(scale=[-5.], validate_args=True, name="G")
with self.assertRaisesOpError("Condition x > 0 did not hold"):
halfnorm.mean().eval()
def testHalfNormalShape(self):
with self.cached_session():
scale = constant_op.constant([6.0] * 5)
halfnorm = hn_lib.HalfNormal(scale=scale)
self.assertEqual(halfnorm.batch_shape_tensor().eval(), [5])
self.assertEqual(halfnorm.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(halfnorm.event_shape_tensor().eval(), [])
self.assertEqual(halfnorm.event_shape, tensor_shape.TensorShape([]))
def testHalfNormalShapeWithPlaceholders(self):
scale = array_ops.placeholder(dtype=dtypes.float32)
halfnorm = hn_lib.HalfNormal(scale=scale)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(halfnorm.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(halfnorm.event_shape, ())
self.assertAllEqual(halfnorm.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(halfnorm.batch_shape_tensor(),
feed_dict={scale: [1.0, 2.0]}), [2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/half_normal_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import inverse_gamma
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class InverseGammaTest(test.TestCase):
def testInverseGammaShape(self):
with self.cached_session():
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
self.assertEqual(inv_gamma.batch_shape_tensor().eval(), (5,))
self.assertEqual(inv_gamma.batch_shape,
tensor_shape.TensorShape([5]))
self.assertAllEqual(inv_gamma.event_shape_tensor().eval(), [])
self.assertEqual(inv_gamma.event_shape, tensor_shape.TensorShape(
[]))
def testInverseGammaLogPDF(self):
with self.cached_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.invgamma.logpdf(x, alpha_v, scale=beta_v)
log_pdf = inv_gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = inv_gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testInverseGammaLogPDFMultidimensional(self):
with self.cached_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.invgamma.logpdf(x, alpha_v, scale=beta_v)
log_pdf = inv_gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = inv_gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testInverseGammaLogPDFMultidimensionalBroadcasting(self):
with self.cached_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.invgamma.logpdf(x, alpha_v, scale=beta_v)
log_pdf = inv_gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = inv_gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testInverseGammaCDF(self):
with self.cached_session():
batch_size = 6
alpha_v = 2.0
beta_v = 3.0
alpha = constant_op.constant([alpha_v] * batch_size)
beta = constant_op.constant([beta_v] * batch_size)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
expected_cdf = stats.invgamma.cdf(x, alpha_v, scale=beta_v)
cdf = inv_gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (batch_size,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testInverseGammaMode(self):
with self.cached_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v)
expected_modes = beta_v / (alpha_v + 1)
self.assertEqual(inv_gamma.mode().get_shape(), (3,))
self.assertAllClose(inv_gamma.mode().eval(), expected_modes)
def testInverseGammaMeanAllDefined(self):
with self.cached_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v)
expected_means = stats.invgamma.mean(alpha_v, scale=beta_v)
self.assertEqual(inv_gamma.mean().get_shape(), (3,))
self.assertAllClose(inv_gamma.mean().eval(), expected_means)
def testInverseGammaMeanAllowNanStats(self):
with self.cached_session():
# Mean will not be defined for the first entry.
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
inv_gamma.mean().eval()
def testInverseGammaMeanNanStats(self):
with self.cached_session():
# Mode will not be defined for the first two entries.
alpha_v = np.array([0.5, 1.0, 3.0, 2.5])
beta_v = np.array([1.0, 2.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=True)
expected_means = beta_v / (alpha_v - 1)
expected_means[0] = np.nan
expected_means[1] = np.nan
self.assertEqual(inv_gamma.mean().get_shape(), (4,))
self.assertAllClose(inv_gamma.mean().eval(), expected_means)
def testInverseGammaVarianceAllDefined(self):
with self.cached_session():
alpha_v = np.array([7.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v)
expected_variances = stats.invgamma.var(alpha_v, scale=beta_v)
self.assertEqual(inv_gamma.variance().get_shape(), (3,))
self.assertAllClose(inv_gamma.variance().eval(), expected_variances)
def testInverseGammaVarianceAllowNanStats(self):
with self.cached_session():
alpha_v = np.array([1.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
inv_gamma.variance().eval()
def testInverseGammaVarianceNanStats(self):
with self.cached_session():
alpha_v = np.array([1.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, allow_nan_stats=True)
expected_variances = stats.invgamma.var(alpha_v, scale=beta_v)
expected_variances[0] = np.nan
self.assertEqual(inv_gamma.variance().get_shape(), (3,))
self.assertAllClose(inv_gamma.variance().eval(), expected_variances)
def testInverseGammaEntropy(self):
with self.cached_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
expected_entropy = stats.invgamma.entropy(alpha_v, scale=beta_v)
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(inv_gamma.entropy().get_shape(), (3,))
self.assertAllClose(inv_gamma.entropy().eval(), expected_entropy)
def testInverseGammaSample(self):
with session.Session():
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha, rate=beta)
samples = inv_gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.invgamma.mean(
alpha_v, scale=beta_v),
atol=.0025)
self.assertAllClose(
sample_values.var(),
stats.invgamma.var(alpha_v, scale=beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testInverseGammaSampleMultiDimensional(self):
with session.Session():
alpha_v = np.array([np.arange(3, 103, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
inv_gamma = inverse_gamma.InverseGamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = inv_gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
stats.invgamma.mean(
alpha_bc, scale=beta_bc),
atol=.25)
self.assertAllClose(
sample_values.var(axis=0),
stats.invgamma.var(alpha_bc, scale=beta_bc),
atol=4.5)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = stats.kstest(samples, stats.invgamma(alpha, scale=beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testInverseGammaPdfOfSampleMultiDims(self):
with session.Session() as sess:
inv_gamma = inverse_gamma.InverseGamma(
concentration=[7., 11.],
rate=[[5.], [6.]])
num = 50000
samples = inv_gamma.sample(num, seed=137)
pdfs = inv_gamma.prob(samples)
sample_vals, pdf_vals = sess.run([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertAllClose(
stats.invgamma.mean(
[[7., 11.], [7., 11.]], scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.invgamma.var([[7., 11.], [7., 11.]],
scale=np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testInverseGammaNonPositiveInitializationParamsRaises(self):
with self.cached_session():
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
with self.assertRaisesOpError("alpha"):
inv_gamma.mean().eval()
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
inv_gamma = inverse_gamma.InverseGamma(
concentration=alpha_v, rate=beta_v, validate_args=True)
with self.assertRaisesOpError("beta"):
inv_gamma.mean().eval()
def testInverseGammaWithSoftplusConcentrationRate(self):
with self.cached_session():
alpha = constant_op.constant([-0.1, -2.9], name="alpha")
beta = constant_op.constant([1.0, -4.8], name="beta")
inv_gamma = inverse_gamma.InverseGammaWithSoftplusConcentrationRate(
concentration=alpha, rate=beta, validate_args=True)
self.assertAllClose(nn_ops.softplus(alpha).eval(),
inv_gamma.concentration.eval())
self.assertAllClose(nn_ops.softplus(beta).eval(),
inv_gamma.rate.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/inverse_gamma_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.platform import test
class LogisticTest(test.TestCase):
def testReparameterizable(self):
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
self.assertTrue(
dist.reparameterization_type == distribution.FULLY_REPARAMETERIZED)
def testLogisticLogProb(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
dist = logistic.Logistic(loc, scale)
expected_log_prob = stats.logistic.logpdf(x, np_loc, scale)
log_prob = dist.log_prob(x)
self.assertEqual(log_prob.get_shape(), (6,))
self.assertAllClose(log_prob.eval(), expected_log_prob)
prob = dist.prob(x)
self.assertEqual(prob.get_shape(), (6,))
self.assertAllClose(prob.eval(), np.exp(expected_log_prob))
def testLogisticCDF(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
cdf = dist.cdf(x)
expected_cdf = stats.logistic.cdf(x, np_loc, scale)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testLogisticLogCDF(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
logcdf = dist.log_cdf(x)
expected_logcdf = stats.logistic.logcdf(x, np_loc, scale)
self.assertEqual(logcdf.get_shape(), (6,))
self.assertAllClose(logcdf.eval(), expected_logcdf)
def testLogisticSurvivalFunction(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
survival_function = dist.survival_function(x)
expected_survival_function = stats.logistic.sf(x, np_loc, scale)
self.assertEqual(survival_function.get_shape(), (6,))
self.assertAllClose(survival_function.eval(), expected_survival_function)
def testLogisticLogSurvivalFunction(self):
with self.cached_session():
batch_size = 6
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
dist = logistic.Logistic(loc, scale)
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
logsurvival_function = dist.log_survival_function(x)
expected_logsurvival_function = stats.logistic.logsf(x, np_loc, scale)
self.assertEqual(logsurvival_function.get_shape(), (6,))
self.assertAllClose(logsurvival_function.eval(),
expected_logsurvival_function)
def testLogisticMean(self):
with self.cached_session():
loc = [2.0, 1.5, 1.0]
scale = 1.5
expected_mean = stats.logistic.mean(loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.mean().eval(), expected_mean)
def testLogisticVariance(self):
with self.cached_session():
loc = [2.0, 1.5, 1.0]
scale = 1.5
expected_variance = stats.logistic.var(loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.variance().eval(), expected_variance)
def testLogisticEntropy(self):
with self.cached_session():
batch_size = 3
np_loc = np.array([2.0] * batch_size, dtype=np.float32)
loc = constant_op.constant(np_loc)
scale = 1.5
expected_entropy = stats.logistic.entropy(np_loc, scale)
dist = logistic.Logistic(loc, scale)
self.assertAllClose(dist.entropy().eval(), expected_entropy)
def testLogisticSample(self):
with self.cached_session():
loc = [3.0, 4.0, 2.0]
scale = 1.0
dist = logistic.Logistic(loc, scale)
sample = dist.sample(seed=100)
self.assertEqual(sample.get_shape(), (3,))
self.assertAllClose(sample.eval(), [6.22460556, 3.79602098, 2.05084133])
def testDtype(self):
loc = constant_op.constant([0.1, 0.4], dtype=dtypes.float32)
scale = constant_op.constant(1.0, dtype=dtypes.float32)
dist = logistic.Logistic(loc, scale)
self.assertEqual(dist.dtype, dtypes.float32)
self.assertEqual(dist.loc.dtype, dist.scale.dtype)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.loc.dtype, dist.mean().dtype)
self.assertEqual(dist.loc.dtype, dist.variance().dtype)
self.assertEqual(dist.loc.dtype, dist.stddev().dtype)
self.assertEqual(dist.loc.dtype, dist.entropy().dtype)
self.assertEqual(dist.loc.dtype, dist.prob(0.2).dtype)
self.assertEqual(dist.loc.dtype, dist.log_prob(0.2).dtype)
loc = constant_op.constant([0.1, 0.4], dtype=dtypes.float64)
scale = constant_op.constant(1.0, dtype=dtypes.float64)
dist64 = logistic.Logistic(loc, scale)
self.assertEqual(dist64.dtype, dtypes.float64)
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/logistic_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import negative_binomial
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# In all tests that follow, we use scipy.stats.nbinom, which
# represents a Negative Binomial distribution, with success and failure
# probabilities flipped.
class NegativeBinomialTest(test.TestCase):
def testNegativeBinomialShape(self):
with self.cached_session():
probs = [.1] * 5
total_count = [2.0] * 5
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
self.assertEqual([5], negbinom.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([5]), negbinom.batch_shape)
self.assertAllEqual([], negbinom.event_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), negbinom.event_shape)
def testNegativeBinomialShapeBroadcast(self):
with self.cached_session():
probs = [[.1, .2, .3]] * 5
total_count = [[2.]] * 5
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
self.assertAllEqual([5, 3], negbinom.batch_shape_tensor().eval())
self.assertAllEqual(
tensor_shape.TensorShape([5, 3]), negbinom.batch_shape)
self.assertAllEqual([], negbinom.event_shape_tensor().eval())
self.assertAllEqual(tensor_shape.TensorShape([]), negbinom.event_shape)
def testLogits(self):
logits = [[0., 9., -0.5]]
with self.cached_session():
negbinom = negative_binomial.NegativeBinomial(
total_count=3., logits=logits)
self.assertEqual([1, 3], negbinom.probs.get_shape())
self.assertEqual([1, 3], negbinom.logits.get_shape())
self.assertAllClose(logits, negbinom.logits.eval())
def testInvalidP(self):
invalid_ps = [-.01, 0., -2.,]
with self.cached_session():
with self.assertRaisesOpError("Condition x >= 0"):
negbinom = negative_binomial.NegativeBinomial(
5., probs=invalid_ps, validate_args=True)
negbinom.probs.eval()
invalid_ps = [1.01, 2., 1.001,]
with self.cached_session():
with self.assertRaisesOpError("probs has components greater than 1."):
negbinom = negative_binomial.NegativeBinomial(
5., probs=invalid_ps, validate_args=True)
negbinom.probs.eval()
def testInvalidNegativeCount(self):
invalid_rs = [-.01, 0., -2.,]
with self.cached_session():
with self.assertRaisesOpError("Condition x > 0"):
negbinom = negative_binomial.NegativeBinomial(
total_count=invalid_rs, probs=0.1, validate_args=True)
negbinom.total_count.eval()
def testNegativeBinomialLogCdf(self):
with self.cached_session():
batch_size = 6
probs = [.2] * batch_size
probs_v = .2
total_count = 5.
x = np.array([2., 3., 4., 5., 6., 7.], dtype=np.float32)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_log_cdf = stats.nbinom.logcdf(x, n=total_count, p=1 - probs_v)
log_cdf = negbinom.log_cdf(x)
self.assertEqual([6], log_cdf.get_shape())
self.assertAllClose(expected_log_cdf, log_cdf.eval())
cdf = negbinom.cdf(x)
self.assertEqual([6], cdf.get_shape())
self.assertAllClose(np.exp(expected_log_cdf), cdf.eval())
def testNegativeBinomialLogCdfValidateArgs(self):
with self.cached_session():
batch_size = 6
probs = [.9] * batch_size
total_count = 5.
with self.assertRaisesOpError("Condition x >= 0"):
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs, validate_args=True)
negbinom.log_cdf(-1.).eval()
def testNegativeBinomialLogPmf(self):
with self.cached_session():
batch_size = 6
probs = [.2] * batch_size
probs_v = .2
total_count = 5.
x = np.array([2., 3., 4., 5., 6., 7.], dtype=np.float32)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_log_pmf = stats.nbinom.logpmf(x, n=total_count, p=1 - probs_v)
log_pmf = negbinom.log_prob(x)
self.assertEqual([6], log_pmf.get_shape())
self.assertAllClose(expected_log_pmf, log_pmf.eval())
pmf = negbinom.prob(x)
self.assertEqual([6], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_pmf), pmf.eval())
def testNegativeBinomialLogPmfValidateArgs(self):
with self.cached_session():
batch_size = 6
probs = [.9] * batch_size
total_count = 5.
x = array_ops.placeholder(dtypes.float32, shape=[6])
feed_dict = {x: [2.5, 3.2, 4.3, 5.1, 6., 7.]}
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs, validate_args=True)
with self.assertRaisesOpError("Condition x == y"):
log_pmf = negbinom.log_prob(x)
log_pmf.eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x >= 0"):
log_pmf = negbinom.log_prob([-1.])
log_pmf.eval(feed_dict=feed_dict)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs, validate_args=False)
log_pmf = negbinom.log_prob(x)
self.assertEqual([6], log_pmf.get_shape())
pmf = negbinom.prob(x)
self.assertEqual([6], pmf.get_shape())
def testNegativeBinomialLogPmfMultidimensional(self):
with self.cached_session():
batch_size = 6
probs = constant_op.constant([[.2, .3, .5]] * batch_size)
probs_v = np.array([.2, .3, .5])
total_count = 5.
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_log_pmf = stats.nbinom.logpmf(
x, n=total_count, p=1 - probs_v)
log_pmf = negbinom.log_prob(x)
log_pmf_values = log_pmf.eval()
self.assertEqual([6, 3], log_pmf.get_shape())
self.assertAllClose(expected_log_pmf, log_pmf_values)
pmf = negbinom.prob(x)
pmf_values = pmf.eval()
self.assertEqual([6, 3], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_pmf), pmf_values)
def testNegativeBinomialMean(self):
with self.cached_session():
total_count = 5.
probs = np.array([.1, .3, .25], dtype=np.float32)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_means = stats.nbinom.mean(n=total_count, p=1 - probs)
self.assertEqual([3], negbinom.mean().get_shape())
self.assertAllClose(expected_means, negbinom.mean().eval())
def testNegativeBinomialVariance(self):
with self.cached_session():
total_count = 5.
probs = np.array([.1, .3, .25], dtype=np.float32)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_vars = stats.nbinom.var(n=total_count, p=1 - probs)
self.assertEqual([3], negbinom.variance().get_shape())
self.assertAllClose(expected_vars, negbinom.variance().eval())
def testNegativeBinomialStddev(self):
with self.cached_session():
total_count = 5.
probs = np.array([.1, .3, .25], dtype=np.float32)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
expected_stds = stats.nbinom.std(n=total_count, p=1 - probs)
self.assertEqual([3], negbinom.stddev().get_shape())
self.assertAllClose(expected_stds, negbinom.stddev().eval())
def testNegativeBinomialSample(self):
with self.cached_session() as sess:
probs = [.3, .9]
total_count = [4., 11.]
n = int(100e3)
negbinom = negative_binomial.NegativeBinomial(
total_count=total_count, probs=probs)
samples = negbinom.sample(n, seed=12345)
self.assertEqual([n, 2], samples.get_shape())
sample_mean = math_ops.reduce_mean(samples, axis=0)
sample_var = math_ops.reduce_mean(
(samples - sample_mean[array_ops.newaxis, ...])**2., axis=0)
sample_min = math_ops.reduce_min(samples)
[sample_mean_, sample_var_, sample_min_] = sess.run([
sample_mean, sample_var, sample_min])
self.assertAllEqual(np.ones(sample_min_.shape, dtype=np.bool),
sample_min_ >= 0.0)
for i in range(2):
self.assertAllClose(sample_mean_[i],
stats.nbinom.mean(total_count[i], 1 - probs[i]),
atol=0.,
rtol=.02)
self.assertAllClose(sample_var_[i],
stats.nbinom.var(total_count[i], 1 - probs[i]),
atol=0.,
rtol=.02)
def testLogProbOverflow(self):
with self.cached_session() as sess:
logits = np.float32([20., 30., 40.])
total_count = np.float32(1.)
x = np.float32(0.)
nb = negative_binomial.NegativeBinomial(
total_count=total_count, logits=logits)
log_prob_ = sess.run(nb.log_prob(x))
self.assertAllEqual(np.ones_like(log_prob_, dtype=np.bool),
np.isfinite(log_prob_))
def testLogProbUnderflow(self):
with self.cached_session() as sess:
logits = np.float32([-90, -100, -110])
total_count = np.float32(1.)
x = np.float32(0.)
nb = negative_binomial.NegativeBinomial(
total_count=total_count, logits=logits)
log_prob_ = sess.run(nb.log_prob(x))
self.assertAllEqual(np.ones_like(log_prob_, dtype=np.bool),
np.isfinite(log_prob_))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/negative_binomial_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormalFullCovariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(42)
class MultivariateNormalFullCovarianceTest(test.TestCase):
def _random_pd_matrix(self, *shape):
mat = rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
return math_ops.matmul(chol, chol, adjoint_b=True).eval()
def testRaisesIfInitializedWithNonSymmetricMatrix(self):
with self.cached_session():
mu = [1., 2.]
sigma = [[1., 0.], [1., 1.]] # Nonsingular, but not symmetric
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
with self.assertRaisesOpError("not symmetric"):
mvn.covariance().eval()
def testNamePropertyIsSetByInitArg(self):
with self.cached_session():
mu = [1., 2.]
sigma = [[1., 0.], [0., 1.]]
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, name="Billy")
self.assertEqual(mvn.name, "Billy/")
def testDoesNotRaiseIfInitializedWithSymmetricMatrix(self):
with self.cached_session():
mu = rng.rand(10)
sigma = self._random_pd_matrix(10, 10)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
# Should not raise
mvn.covariance().eval()
def testLogPDFScalarBatch(self):
with self.cached_session():
mu = rng.rand(2)
sigma = self._random_pd_matrix(2, 2)
mvn = ds.MultivariateNormalFullCovariance(mu, sigma, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFScalarBatchCovarianceNotProvided(self):
with self.cached_session():
mu = rng.rand(2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance_matrix=None, validate_args=True)
x = rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
# Initialize a scipy_mvn with the default covariance.
scipy_mvn = stats.multivariate_normal(mean=mu, cov=np.eye(2))
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testShapes(self):
with self.cached_session():
mu = rng.rand(3, 5, 2)
covariance = self._random_pd_matrix(3, 5, 2, 2)
mvn = ds.MultivariateNormalFullCovariance(
mu, covariance, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = rng.randn(*mu_shape)
return mu, sigma
def testKLBatch(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLBatchBroadcast(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
# No batch shape.
mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)
mvn_a = ds.MultivariateNormalFullCovariance(
loc=mu_a,
covariance_matrix=sigma_a,
validate_args=True)
mvn_b = ds.MultivariateNormalFullCovariance(
loc=mu_b,
covariance_matrix=sigma_b,
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b, sigma_b)
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b, sigma_b)
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_full_covariance_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the statistical testing library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import statistical_testing as st
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class StatisticalTestingTest(test.TestCase):
def test_dkwm_design_mean_one_sample_soundness(self):
thresholds = [1e-5, 1e-2, 1.1e-1, 0.9, 1., 1.02, 2., 10., 1e2, 1e5, 1e10]
rates = [1e-6, 1e-3, 1e-2, 1.1e-1, 0.2, 0.5, 0.7, 1.]
false_fail_rates, false_pass_rates = np.meshgrid(rates, rates)
false_fail_rates = false_fail_rates.flatten().astype(np.float32)
false_pass_rates = false_pass_rates.flatten().astype(np.float32)
detectable_discrepancies = []
for false_pass_rate, false_fail_rate in zip(
false_pass_rates, false_fail_rates):
sufficient_n = st.min_num_samples_for_dkwm_mean_test(
thresholds, low=0., high=1., false_fail_rate=false_fail_rate,
false_pass_rate=false_pass_rate)
detectable_discrepancies.append(
st.min_discrepancy_of_true_means_detectable_by_dkwm(
sufficient_n, low=0., high=1., false_fail_rate=false_fail_rate,
false_pass_rate=false_pass_rate))
detectable_discrepancies_ = self.evaluate(detectable_discrepancies)
for discrepancies, false_pass_rate, false_fail_rate in zip(
detectable_discrepancies_, false_pass_rates, false_fail_rates):
below_threshold = discrepancies <= thresholds
self.assertAllEqual(
np.ones_like(below_threshold, np.bool), below_threshold,
msg='false_pass_rate({}), false_fail_rate({})'.format(
false_pass_rate, false_fail_rate))
def test_dkwm_design_mean_two_sample_soundness(self):
thresholds = [1e-5, 1e-2, 1.1e-1, 0.9, 1., 1.02, 2., 10., 1e2, 1e5, 1e10]
rates = [1e-6, 1e-3, 1e-2, 1.1e-1, 0.2, 0.5, 0.7, 1.]
false_fail_rates, false_pass_rates = np.meshgrid(rates, rates)
false_fail_rates = false_fail_rates.flatten().astype(np.float32)
false_pass_rates = false_pass_rates.flatten().astype(np.float32)
detectable_discrepancies = []
for false_pass_rate, false_fail_rate in zip(
false_pass_rates, false_fail_rates):
[
sufficient_n1,
sufficient_n2
] = st.min_num_samples_for_dkwm_mean_two_sample_test(
thresholds, low1=0., high1=1., low2=0., high2=1.,
false_fail_rate=false_fail_rate,
false_pass_rate=false_pass_rate)
detectable_discrepancies.append(
st.min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
n1=sufficient_n1,
low1=0.,
high1=1.,
n2=sufficient_n2,
low2=0.,
high2=1.,
false_fail_rate=false_fail_rate,
false_pass_rate=false_pass_rate))
detectable_discrepancies_ = self.evaluate(detectable_discrepancies)
for discrepancies, false_pass_rate, false_fail_rate in zip(
detectable_discrepancies_, false_pass_rates, false_fail_rates):
below_threshold = discrepancies <= thresholds
self.assertAllEqual(
np.ones_like(below_threshold, np.bool), below_threshold,
msg='false_pass_rate({}), false_fail_rate({})'.format(
false_pass_rate, false_fail_rate))
def test_true_mean_confidence_interval_by_dkwm_one_sample(self):
rng = np.random.RandomState(seed=0)
num_samples = 5000
# 5000 samples is chosen to be enough to find discrepancies of
# size 0.1 or more with assurance 1e-6, as confirmed here:
d = st.min_discrepancy_of_true_means_detectable_by_dkwm(
num_samples, 0., 1., false_fail_rate=1e-6, false_pass_rate=1e-6)
d = self.evaluate(d)
self.assertLess(d, 0.1)
# Test that the confidence interval computed for the mean includes
# 0.5 and excludes 0.4 and 0.6.
samples = rng.uniform(size=num_samples).astype(np.float32)
(low, high) = st.true_mean_confidence_interval_by_dkwm(
samples, 0., 1., error_rate=1e-6)
low, high = self.evaluate([low, high])
self.assertGreater(low, 0.4)
self.assertLess(low, 0.5)
self.assertGreater(high, 0.5)
self.assertLess(high, 0.6)
def test_dkwm_mean_one_sample_assertion(self):
rng = np.random.RandomState(seed=0)
num_samples = 5000
# Test that the test assertion agrees that the mean of the standard
# uniform distribution is 0.5.
samples = rng.uniform(size=num_samples).astype(np.float32)
self.evaluate(st.assert_true_mean_equal_by_dkwm(
samples, 0., 1., 0.5, false_fail_rate=1e-6))
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is not 0.4.
with self.assertRaisesOpError("true mean greater than expected"):
self.evaluate(st.assert_true_mean_equal_by_dkwm(
samples, 0., 1., 0.4, false_fail_rate=1e-6))
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is not 0.6.
with self.assertRaisesOpError("true mean smaller than expected"):
self.evaluate(st.assert_true_mean_equal_by_dkwm(
samples, 0., 1., 0.6, false_fail_rate=1e-6))
def test_dkwm_mean_in_interval_one_sample_assertion(self):
rng = np.random.RandomState(seed=0)
num_samples = 5000
# Test that the test assertion agrees that the mean of the standard
# uniform distribution is between 0.4 and 0.6.
samples = rng.uniform(size=num_samples).astype(np.float32)
self.evaluate(st.assert_true_mean_in_interval_by_dkwm(
samples, 0., 1.,
expected_low=0.4, expected_high=0.6, false_fail_rate=1e-6))
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is not between 0.2 and 0.4.
with self.assertRaisesOpError("true mean greater than expected"):
self.evaluate(st.assert_true_mean_in_interval_by_dkwm(
samples, 0., 1.,
expected_low=0.2, expected_high=0.4, false_fail_rate=1e-6))
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is not between 0.6 and 0.8.
with self.assertRaisesOpError("true mean smaller than expected"):
self.evaluate(st.assert_true_mean_in_interval_by_dkwm(
samples, 0., 1.,
expected_low=0.6, expected_high=0.8, false_fail_rate=1e-6))
def test_dkwm_mean_two_sample_assertion(self):
rng = np.random.RandomState(seed=0)
num_samples = 4000
# 4000 samples is chosen to be enough to find discrepancies of
# size 0.2 or more with assurance 1e-6, as confirmed here:
d = st.min_discrepancy_of_true_means_detectable_by_dkwm_two_sample(
num_samples, 0., 1., num_samples, 0., 1.,
false_fail_rate=1e-6, false_pass_rate=1e-6)
d = self.evaluate(d)
self.assertLess(d, 0.2)
# Test that the test assertion agrees that the standard
# uniform distribution has the same mean as itself.
samples1 = rng.uniform(size=num_samples).astype(np.float32)
samples2 = rng.uniform(size=num_samples).astype(np.float32)
self.evaluate(st.assert_true_mean_equal_by_dkwm_two_sample(
samples1, 0., 1., samples2, 0., 1., false_fail_rate=1e-6))
def test_dkwm_mean_two_sample_assertion_beta_2_1_false(self):
rng = np.random.RandomState(seed=0)
num_samples = 4000
samples1 = rng.uniform(size=num_samples).astype(np.float32)
# As established above, 4000 samples is enough to find discrepancies
# of size 0.2 or more with assurance 1e-6.
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is different from the mean of beta(2, 1).
beta_high_samples = rng.beta(2, 1, size=num_samples).astype(np.float32)
with self.assertRaisesOpError("true mean smaller than expected"):
self.evaluate(st.assert_true_mean_equal_by_dkwm_two_sample(
samples1, 0., 1.,
beta_high_samples, 0., 1.,
false_fail_rate=1e-6))
def test_dkwm_mean_two_sample_assertion_beta_1_2_false(self):
rng = np.random.RandomState(seed=0)
num_samples = 4000
samples1 = rng.uniform(size=num_samples).astype(np.float32)
# As established above, 4000 samples is enough to find discrepancies
# of size 0.2 or more with assurance 1e-6.
# Test that the test assertion confirms that the mean of the
# standard uniform distribution is different from the mean of beta(1, 2).
beta_low_samples = rng.beta(1, 2, size=num_samples).astype(np.float32)
with self.assertRaisesOpError("true mean greater than expected"):
self.evaluate(st.assert_true_mean_equal_by_dkwm_two_sample(
samples1, 0., 1.,
beta_low_samples, 0., 1.,
false_fail_rate=1e-6))
def test_dkwm_argument_validity_checking(self):
rng = np.random.RandomState(seed=0)
samples = rng.uniform(
low=[0., 1.], high=[1., 2.], size=(2500, 1, 2)).astype(np.float32)
# Test that the test library complains if the given samples fall
# outside the purported bounds.
with self.assertRaisesOpError("maximum value exceeds expectations"):
self.evaluate(st.true_mean_confidence_interval_by_dkwm(
samples, [[0., 1.]], [[0.5, 1.5]], error_rate=0.5))
with self.assertRaisesOpError("minimum value falls below expectations"):
self.evaluate(st.true_mean_confidence_interval_by_dkwm(
samples, [[0.5, 1.5]], [[1., 2.]], error_rate=0.5))
# But doesn't complain if they don't.
op = st.true_mean_confidence_interval_by_dkwm(
samples, [[0., 1.]], [[1., 2.]], error_rate=0.5)
_ = self.evaluate(op)
def test_do_maximum_mean(self):
n = 117
envelope = 0.02 # > 2 / n, but < 3 / n
rng = np.random.RandomState(seed=8)
samples = rng.uniform(size=n).astype(np.float32)
# Compute the answer in TF using the code under test
envelope_t = ops.convert_to_tensor(envelope)
max_mean = st._do_maximum_mean(samples, envelope_t, 1)
max_mean = self.evaluate(max_mean)
# Compute the correct answer for this case in numpy. In this
# example, `n` and `envelope` are such that `samples[2]` is the
# element that should be taken partially, regardless of the
# content of the `samples` array (see algorithm description in
# `../ops/statistical_testing.py`).
samples = sorted(samples)
weight = 1. / n - (envelope - 2. / n)
answer = samples[2] * weight + sum(samples[3:]) / n + envelope * 1.
self.assertAllClose(max_mean, answer, rtol=1e-9)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/statistical_testing_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the RelaxedBernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.special
from tensorflow.contrib.distributions.python.ops import relaxed_bernoulli
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import test
class RelaxedBernoulliTest(test.TestCase):
def testP(self):
"""Tests that parameter P is set correctly. Note that dist.p != dist.pdf."""
temperature = 1.0
p = [0.1, 0.4]
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
with self.cached_session():
self.assertAllClose(p, dist.probs.eval())
def testLogits(self):
temperature = 2.0
logits = [-42., 42.]
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, logits=logits)
with self.cached_session():
self.assertAllClose(logits, dist.logits.eval())
with self.cached_session():
self.assertAllClose(scipy.special.expit(logits), dist.probs.eval())
p = [0.01, 0.99, 0.42]
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
with self.cached_session():
self.assertAllClose(scipy.special.logit(p), dist.logits.eval())
def testInvalidP(self):
temperature = 1.0
invalid_ps = [1.01, 2.]
for p in invalid_ps:
with self.cached_session():
with self.assertRaisesOpError("probs has components greater than 1"):
dist = relaxed_bernoulli.RelaxedBernoulli(temperature,
probs=p,
validate_args=True)
dist.probs.eval()
invalid_ps = [-0.01, -3.]
for p in invalid_ps:
with self.cached_session():
with self.assertRaisesOpError("Condition x >= 0"):
dist = relaxed_bernoulli.RelaxedBernoulli(temperature,
probs=p,
validate_args=True)
dist.probs.eval()
valid_ps = [0.0, 0.5, 1.0]
for p in valid_ps:
with self.cached_session():
dist = relaxed_bernoulli.RelaxedBernoulli(temperature,
probs=p)
self.assertEqual(p, dist.probs.eval())
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
temperature = 1.0
p = np.random.random(batch_shape).astype(np.float32)
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape.as_list())
self.assertAllEqual([], dist.event_shape_tensor().eval())
def testZeroTemperature(self):
"""If validate_args, raises InvalidArgumentError when temperature is 0."""
temperature = constant_op.constant(0.0)
p = constant_op.constant([0.1, 0.4])
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p,
validate_args=True)
with self.cached_session():
sample = dist.sample()
with self.assertRaises(errors_impl.InvalidArgumentError):
sample.eval()
def testDtype(self):
temperature = constant_op.constant(1.0, dtype=dtypes.float32)
p = constant_op.constant([0.1, 0.4], dtype=dtypes.float32)
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
self.assertEqual(dist.dtype, dtypes.float32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.probs.dtype, dist.prob([0.0]).dtype)
self.assertEqual(dist.probs.dtype, dist.log_prob([0.0]).dtype)
temperature = constant_op.constant(1.0, dtype=dtypes.float64)
p = constant_op.constant([0.1, 0.4], dtype=dtypes.float64)
dist64 = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
self.assertEqual(dist64.dtype, dtypes.float64)
self.assertEqual(dist64.dtype, dist64.sample(5).dtype)
def testLogProb(self):
with self.cached_session():
t = np.array(1.0, dtype=np.float64)
p = np.array(0.1, dtype=np.float64) # P(x=1)
dist = relaxed_bernoulli.RelaxedBernoulli(t, probs=p)
xs = np.array([0.1, 0.3, 0.5, 0.9], dtype=np.float64)
# analytical density from Maddison et al. 2016
alpha = np.array(p/(1-p), dtype=np.float64)
expected_log_pdf = (np.log(t) + np.log(alpha) +
(-t-1)*(np.log(xs)+np.log(1-xs)) -
2*np.log(alpha*np.power(xs, -t) + np.power(1-xs, -t)))
log_pdf = dist.log_prob(xs).eval()
self.assertAllClose(expected_log_pdf, log_pdf)
def testBoundaryConditions(self):
with self.cached_session():
temperature = 1e-2
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=1.0)
self.assertAllClose(np.nan, dist.log_prob(0.0).eval())
self.assertAllClose([np.nan], [dist.log_prob(1.0).eval()])
def testSampleN(self):
"""mean of quantized samples still approximates the Bernoulli mean."""
with self.cached_session():
temperature = 1e-2
p = [0.2, 0.6, 0.5]
dist = relaxed_bernoulli.RelaxedBernoulli(temperature, probs=p)
n = 10000
samples = dist.sample(n)
self.assertEqual(samples.dtype, dtypes.float32)
sample_values = samples.eval()
self.assertTrue(np.all(sample_values >= 0))
self.assertTrue(np.all(sample_values <= 1))
frac_ones_like = np.sum(sample_values >= 0.5, axis=0)/n
self.assertAllClose(p, frac_ones_like, atol=1e-2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/relaxed_bernoulli_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformedDistribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
bs = bijectors
ds = distributions
la = linalg
class DummyMatrixTransform(bs.Bijector):
"""Tractable matrix transformation.
This is a non-sensical bijector that has forward/inverse_min_event_ndims=2.
The main use is to check that transformed distribution calculations are done
appropriately.
"""
def __init__(self):
super(DummyMatrixTransform, self).__init__(
forward_min_event_ndims=2,
is_constant_jacobian=False,
validate_args=False,
name="dummy")
def _forward(self, x):
return x
def _inverse(self, y):
return y
# Note: These jacobians don't make sense.
def _forward_log_det_jacobian(self, x):
return -linalg_ops.matrix_determinant(x)
def _inverse_log_det_jacobian(self, x):
return linalg_ops.matrix_determinant(x)
class TransformedDistributionTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def _make_unimplemented(self, name):
def _unimplemented(self, *args): # pylint: disable=unused-argument
raise NotImplementedError("{} not implemented".format(name))
return _unimplemented
def testTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 3.0
sigma = 2.0
# Note: the Jacobian callable only works for this example; more generally
# you may or may not need a reduce_sum.
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.Exp())
sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))
# sample
sample = log_normal.sample(100000, seed=235)
self.assertAllEqual([], log_normal.event_shape)
with self.session(graph=g):
self.assertAllEqual([], log_normal.event_shape_tensor().eval())
self.assertAllClose(
sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)
# pdf, log_pdf, cdf, etc...
# The mean of the lognormal is around 148.
test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
for func in [[log_normal.log_prob, sp_dist.logpdf],
[log_normal.prob, sp_dist.pdf],
[log_normal.log_cdf, sp_dist.logcdf],
[log_normal.cdf, sp_dist.cdf],
[log_normal.survival_function, sp_dist.sf],
[log_normal.log_survival_function, sp_dist.logsf]]:
actual = func[0](test_vals)
expected = func[1](test_vals)
with self.session(graph=g):
self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
def testNonInjectiveTransformedDistribution(self):
g = ops.Graph()
with g.as_default():
mu = 1.
sigma = 2.0
abs_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=bs.AbsoluteValue())
sp_normal = stats.norm(mu, sigma)
# sample
sample = abs_normal.sample(100000, seed=235)
self.assertAllEqual([], abs_normal.event_shape)
with self.session(graph=g):
sample_ = sample.eval()
self.assertAllEqual([], abs_normal.event_shape_tensor().eval())
# Abs > 0, duh!
np.testing.assert_array_less(0, sample_)
# Let X ~ Normal(mu, sigma), Y := |X|, then
# P[Y < 0.77] = P[-0.77 < X < 0.77]
self.assertAllClose(
sp_normal.cdf(0.77) - sp_normal.cdf(-0.77),
(sample_ < 0.77).mean(), rtol=0.01)
# p_Y(y) = p_X(-y) + p_X(y),
self.assertAllClose(
sp_normal.pdf(1.13) + sp_normal.pdf(-1.13),
abs_normal.prob(1.13).eval())
# Log[p_Y(y)] = Log[p_X(-y) + p_X(y)]
self.assertAllClose(
np.log(sp_normal.pdf(2.13) + sp_normal.pdf(-2.13)),
abs_normal.log_prob(2.13).eval())
def testQuantile(self):
with self.cached_session() as sess:
logit_normal = self._cls()(
distribution=ds.Normal(loc=0., scale=1.),
bijector=bs.Sigmoid(),
validate_args=True)
grid = [0., 0.25, 0.5, 0.75, 1.]
q = logit_normal.quantile(grid)
cdf = logit_normal.cdf(q)
cdf_ = sess.run(cdf)
self.assertAllClose(grid, cdf_, rtol=1e-6, atol=0.)
def testCachedSamples(self):
exp_forward_only = bs.Exp()
exp_forward_only._inverse = self._make_unimplemented(
"inverse")
exp_forward_only._inverse_event_shape_tensor = self._make_unimplemented(
"inverse_event_shape_tensor ")
exp_forward_only._inverse_event_shape = self._make_unimplemented(
"inverse_event_shape ")
exp_forward_only._inverse_log_det_jacobian = self._make_unimplemented(
"inverse_log_det_jacobian ")
with self.cached_session() as sess:
mu = 3.0
sigma = 0.02
log_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=exp_forward_only)
sample = log_normal.sample([2, 3], seed=42)
sample_val, log_pdf_val = sess.run([sample, log_normal.log_prob(sample)])
expected_log_pdf = stats.lognorm.logpdf(
sample_val, s=sigma, scale=np.exp(mu))
self.assertAllClose(expected_log_pdf, log_pdf_val, rtol=1e-4, atol=0.)
def testCachedSamplesInvert(self):
exp_inverse_only = bs.Exp()
exp_inverse_only._forward = self._make_unimplemented(
"forward")
exp_inverse_only._forward_event_shape_tensor = self._make_unimplemented(
"forward_event_shape_tensor ")
exp_inverse_only._forward_event_shape = self._make_unimplemented(
"forward_event_shape ")
exp_inverse_only._forward_log_det_jacobian = self._make_unimplemented(
"forward_log_det_jacobian ")
log_forward_only = bs.Invert(exp_inverse_only)
with self.cached_session() as sess:
# The log bijector isn't defined over the whole real line, so we make
# sigma sufficiently small so that the draws are positive.
mu = 2.
sigma = 1e-2
exp_normal = self._cls()(
distribution=ds.Normal(loc=mu, scale=sigma),
bijector=log_forward_only)
sample = exp_normal.sample([2, 3], seed=42)
sample_val, log_pdf_val = sess.run([sample, exp_normal.log_prob(sample)])
expected_log_pdf = sample_val + stats.norm.logpdf(
np.exp(sample_val), loc=mu, scale=sigma)
self.assertAllClose(expected_log_pdf, log_pdf_val, atol=0.)
def testShapeChangingBijector(self):
with self.cached_session():
softmax = bs.SoftmaxCentered()
standard_normal = ds.Normal(loc=0., scale=1.)
multi_logit_normal = self._cls()(
distribution=standard_normal,
bijector=softmax,
event_shape=[1])
x = [[[-np.log(3.)], [0.]],
[[np.log(3)], [np.log(5)]]]
y = softmax.forward(x).eval()
expected_log_pdf = (
np.squeeze(stats.norm(loc=0., scale=1.).logpdf(x)) -
np.sum(np.log(y), axis=-1))
self.assertAllClose(expected_log_pdf,
multi_logit_normal.log_prob(y).eval())
self.assertAllClose(
[1, 2, 3, 2],
array_ops.shape(multi_logit_normal.sample([1, 2, 3])).eval())
self.assertAllEqual([2], multi_logit_normal.event_shape)
self.assertAllEqual([2], multi_logit_normal.event_shape_tensor().eval())
def testCastLogDetJacobian(self):
"""Test log_prob when Jacobian and log_prob dtypes do not match."""
with self.cached_session():
# Create an identity bijector whose jacobians have dtype int32
int_identity = bs.Inline(
forward_fn=array_ops.identity,
inverse_fn=array_ops.identity,
inverse_log_det_jacobian_fn=(
lambda y: math_ops.cast(0, dtypes.int32)),
forward_log_det_jacobian_fn=(
lambda x: math_ops.cast(0, dtypes.int32)),
forward_min_event_ndims=0,
is_constant_jacobian=True)
normal = self._cls()(
distribution=ds.Normal(loc=0., scale=1.),
bijector=int_identity,
validate_args=True)
y = normal.sample()
normal.log_prob(y).eval()
normal.prob(y).eval()
normal.entropy().eval()
def testEntropy(self):
with self.cached_session():
shift = np.array([[-1, 0, 1], [-1, -2, -3]], dtype=np.float32)
diag = np.array([[1, 2, 3], [2, 3, 2]], dtype=np.float32)
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(shift[i], np.diag(diag[i]**2)).entropy()]
for i in range(len(diag))])
fake_mvn = self._cls()(
ds.MultivariateNormalDiag(
loc=array_ops.zeros_like(shift),
scale_diag=array_ops.ones_like(diag),
validate_args=True),
bs.AffineLinearOperator(
shift,
scale=la.LinearOperatorDiag(diag, is_non_singular=True),
validate_args=True),
validate_args=True)
self.assertAllClose(actual_mvn_entropy,
fake_mvn.entropy().eval())
def testScalarBatchScalarEventIdentityScale(self):
with self.cached_session() as sess:
exp2 = self._cls()(
ds.Exponential(rate=0.25),
bijector=ds.bijectors.AffineScalar(scale=2.)
)
log_prob = exp2.log_prob(1.)
log_prob_ = sess.run(log_prob)
base_log_prob = -0.5 * 0.25 + np.log(0.25)
ildj = np.log(2.)
self.assertAllClose(base_log_prob - ildj, log_prob_, rtol=1e-6, atol=0.)
class ScalarToMultiTest(test.TestCase):
def _cls(self):
return ds.TransformedDistribution
def setUp(self):
self._shift = np.array([-1, 0, 1], dtype=np.float32)
self._tril = np.array([[[1., 0, 0],
[2, 1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, 2, 0],
[4, 3, 2]]],
dtype=np.float32)
def _testMVN(self,
base_distribution_class,
base_distribution_kwargs,
batch_shape=(),
event_shape=(),
not_implemented_message=None):
with self.cached_session() as sess:
# Overriding shapes must be compatible w/bijector; most bijectors are
# batch_shape agnostic and only care about event_ndims.
# In the case of `Affine`, if we got it wrong then it would fire an
# exception due to incompatible dimensions.
batch_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_batch_shape")
event_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_event_shape")
feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32),
event_shape_pl: np.array(event_shape, dtype=np.int32)}
fake_mvn_dynamic = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape_pl,
event_shape=event_shape_pl,
validate_args=True)
fake_mvn_static = self._cls()(
distribution=base_distribution_class(validate_args=True,
**base_distribution_kwargs),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=True)
actual_mean = np.tile(self._shift, [2, 1]) # Affine elided this tile.
actual_cov = np.matmul(self._tril, np.transpose(self._tril, [0, 2, 1]))
def actual_mvn_log_prob(x):
return np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).logpdf(x[:, i, :])]
for i in range(len(actual_cov))]).T
actual_mvn_entropy = np.concatenate([
[stats.multivariate_normal(
actual_mean[i], actual_cov[i]).entropy()]
for i in range(len(actual_cov))])
self.assertAllEqual([3], fake_mvn_static.event_shape)
self.assertAllEqual([2], fake_mvn_static.batch_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.event_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.batch_shape)
x = fake_mvn_static.sample(5, seed=0).eval()
for unsupported_fn in (fake_mvn_static.log_cdf,
fake_mvn_static.cdf,
fake_mvn_static.survival_function,
fake_mvn_static.log_survival_function):
with self.assertRaisesRegexp(NotImplementedError,
not_implemented_message):
unsupported_fn(x)
num_samples = 5e3
for fake_mvn, feed_dict in ((fake_mvn_static, {}),
(fake_mvn_dynamic, feed_dict)):
# Ensure sample works by checking first, second moments.
y = fake_mvn.sample(int(num_samples), seed=0)
x = y[0:5, ...]
sample_mean = math_ops.reduce_mean(y, 0)
centered_y = array_ops.transpose(y - sample_mean, [1, 2, 0])
sample_cov = math_ops.matmul(
centered_y, centered_y, transpose_b=True) / num_samples
[
sample_mean_,
sample_cov_,
x_,
fake_event_shape_,
fake_batch_shape_,
fake_log_prob_,
fake_prob_,
fake_entropy_,
] = sess.run([
sample_mean,
sample_cov,
x,
fake_mvn.event_shape_tensor(),
fake_mvn.batch_shape_tensor(),
fake_mvn.log_prob(x),
fake_mvn.prob(x),
fake_mvn.entropy(),
], feed_dict=feed_dict)
self.assertAllClose(actual_mean, sample_mean_, atol=0.1, rtol=0.1)
self.assertAllClose(actual_cov, sample_cov_, atol=0., rtol=0.1)
# Ensure all other functions work as intended.
self.assertAllEqual([5, 2, 3], x_.shape)
self.assertAllEqual([3], fake_event_shape_)
self.assertAllEqual([2], fake_batch_shape_)
self.assertAllClose(actual_mvn_log_prob(x_), fake_log_prob_,
atol=0., rtol=1e-6)
self.assertAllClose(np.exp(actual_mvn_log_prob(x_)), fake_prob_,
atol=0., rtol=1e-5)
self.assertAllClose(actual_mvn_entropy, fake_entropy_,
atol=0., rtol=1e-6)
def testScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": 0., "scale": 1.},
batch_shape=[2],
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
def testScalarBatchNonScalarEvent(self):
self._testMVN(
base_distribution_class=ds.MultivariateNormalDiag,
base_distribution_kwargs={"loc": [0., 0., 0.],
"scale_diag": [1., 1, 1]},
batch_shape=[2],
not_implemented_message="not implemented")
with self.cached_session():
# Can't override event_shape for scalar batch, non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[0.], scale_diag=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchScalarEvent(self):
self._testMVN(
base_distribution_class=ds.Normal,
base_distribution_kwargs={"loc": [0., 0], "scale": [1., 1]},
event_shape=[3],
not_implemented_message="not implemented when overriding event_shape")
with self.cached_session():
# Can't override batch_shape for non-scalar batch, scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.Normal(loc=[0.], scale=[1.]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testNonScalarBatchNonScalarEvent(self):
with self.cached_session():
# Can't override event_shape and/or batch_shape for non_scalar batch,
# non-scalar event.
with self.assertRaisesRegexp(ValueError, "base distribution not scalar"):
self._cls()(
distribution=ds.MultivariateNormalDiag(loc=[[0.]],
scale_diag=[[1.]]),
bijector=bs.Affine(shift=self._shift, scale_tril=self._tril),
batch_shape=[2],
event_shape=[3],
validate_args=True)
def testMatrixEvent(self):
with self.cached_session() as sess:
batch_shape = [2]
event_shape = [2, 3, 3]
batch_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_batch_shape")
event_shape_pl = array_ops.placeholder(
dtypes.int32, name="dynamic_event_shape")
feed_dict = {batch_shape_pl: np.array(batch_shape, dtype=np.int32),
event_shape_pl: np.array(event_shape, dtype=np.int32)}
scale = 2.
loc = 0.
fake_mvn_dynamic = self._cls()(
distribution=ds.Normal(
loc=loc,
scale=scale),
bijector=DummyMatrixTransform(),
batch_shape=batch_shape_pl,
event_shape=event_shape_pl,
validate_args=True)
fake_mvn_static = self._cls()(
distribution=ds.Normal(
loc=loc,
scale=scale),
bijector=DummyMatrixTransform(),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=True)
def actual_mvn_log_prob(x):
# This distribution is the normal PDF, reduced over the
# last 3 dimensions + a jacobian term which corresponds
# to the determinant of x.
return (np.sum(
stats.norm(loc, scale).logpdf(x), axis=(-1, -2, -3)) +
np.sum(np.linalg.det(x), axis=-1))
self.assertAllEqual([2, 3, 3], fake_mvn_static.event_shape)
self.assertAllEqual([2], fake_mvn_static.batch_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.event_shape)
self.assertAllEqual(tensor_shape.TensorShape(None),
fake_mvn_dynamic.batch_shape)
num_samples = 5e3
for fake_mvn, feed_dict in ((fake_mvn_static, {}),
(fake_mvn_dynamic, feed_dict)):
# Ensure sample works by checking first, second moments.
y = fake_mvn.sample(int(num_samples), seed=0)
x = y[0:5, ...]
[
x_,
fake_event_shape_,
fake_batch_shape_,
fake_log_prob_,
fake_prob_,
] = sess.run([
x,
fake_mvn.event_shape_tensor(),
fake_mvn.batch_shape_tensor(),
fake_mvn.log_prob(x),
fake_mvn.prob(x),
], feed_dict=feed_dict)
# Ensure all other functions work as intended.
self.assertAllEqual([5, 2, 2, 3, 3], x_.shape)
self.assertAllEqual([2, 3, 3], fake_event_shape_)
self.assertAllEqual([2], fake_batch_shape_)
self.assertAllClose(actual_mvn_log_prob(x_), fake_log_prob_,
atol=0., rtol=1e-6)
self.assertAllClose(np.exp(actual_mvn_log_prob(x_)), fake_prob_,
atol=0., rtol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/transformed_distribution_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalTriLTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_chol(self, *shape):
mat = self._rng.rand(*shape)
chol = ds.matrix_diag_transform(mat, transform=nn_ops.softplus)
chol = array_ops.matrix_band_part(chol, -1, 0)
sigma = math_ops.matmul(chol, chol, adjoint_b=True)
return chol.eval(), sigma.eval()
def testLogPDFScalarBatch(self):
with self.cached_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[1, 1] = -chol[1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((), log_pdf.get_shape())
self.assertEqual((), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(expected_pdf, pdf.eval())
def testLogPDFXIsHigherRank(self):
with self.cached_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(3, 2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
self.assertAllClose(expected_log_pdf, log_pdf.eval(), atol=0., rtol=0.02)
self.assertAllClose(expected_pdf, pdf.eval(), atol=0., rtol=0.03)
def testLogPDFXLowerDimension(self):
with self.cached_session():
mu = self._rng.rand(3, 2)
chol, sigma = self._random_chol(3, 2, 2)
chol[0, 0, 0] = -chol[0, 0, 0]
chol[2, 1, 1] = -chol[2, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
x = self._rng.rand(2)
log_pdf = mvn.log_prob(x)
pdf = mvn.prob(x)
self.assertEqual((3,), log_pdf.get_shape())
self.assertEqual((3,), pdf.get_shape())
# scipy can't do batches, so just test one of them.
scipy_mvn = stats.multivariate_normal(mean=mu[1, :], cov=sigma[1, :, :])
expected_log_pdf = scipy_mvn.logpdf(x)
expected_pdf = scipy_mvn.pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval()[1])
self.assertAllClose(expected_pdf, pdf.eval()[1])
def testEntropy(self):
with self.cached_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
entropy = mvn.entropy()
scipy_mvn = stats.multivariate_normal(mean=mu, cov=sigma)
expected_entropy = scipy_mvn.entropy()
self.assertEqual(entropy.get_shape(), ())
self.assertAllClose(expected_entropy, entropy.eval())
def testEntropyMultidimensional(self):
with self.cached_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
entropy = mvn.entropy()
# Scipy doesn't do batches, so test one of them.
expected_entropy = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).entropy()
self.assertEqual(entropy.get_shape(), (3, 5))
self.assertAllClose(expected_entropy, entropy.eval()[1, 1])
def testSample(self):
with self.cached_session():
mu = self._rng.rand(2)
chol, sigma = self._random_chol(2, 2)
chol[0, 0] = -chol[0, 0]
sigma[0, 1] = -sigma[0, 1]
sigma[1, 0] = -sigma[1, 0]
n = constant_op.constant(100000)
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), [int(100e3), 2])
self.assertAllClose(sample_values.mean(axis=0), mu, atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0), sigma, atol=0.06)
def testSingularScaleRaises(self):
with self.cached_session():
mu = None
chol = [[1., 0.], [0., 0.]]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
with self.assertRaisesOpError("Singular operator"):
mvn.sample().eval()
def testSampleWithSampleShape(self):
with self.cached_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
samples_val = mvn.sample((10, 11, 12), seed=137).eval()
# Check sample shape
self.assertEqual((10, 11, 12, 3, 5, 2), samples_val.shape)
# Check sample means
x = samples_val[:, :, :, 1, 1, :]
self.assertAllClose(
x.reshape(10 * 11 * 12, 2).mean(axis=0), mu[1, 1], atol=0.05)
# Check that log_prob(samples) works
log_prob_val = mvn.log_prob(samples_val).eval()
x_log_pdf = log_prob_val[:, :, :, 1, 1]
expected_log_pdf = stats.multivariate_normal(
mean=mu[1, 1, :], cov=sigma[1, 1, :, :]).logpdf(x)
self.assertAllClose(expected_log_pdf, x_log_pdf)
def testSampleMultiDimensional(self):
with self.cached_session():
mu = self._rng.rand(3, 5, 2)
chol, sigma = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
n = constant_op.constant(100000)
samples = mvn.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (100000, 3, 5, 2))
self.assertAllClose(
sample_values[:, 1, 1, :].mean(axis=0), mu[1, 1, :], atol=0.05)
self.assertAllClose(
np.cov(sample_values[:, 1, 1, :], rowvar=0),
sigma[1, 1, :, :],
atol=1e-1)
def testShapes(self):
with self.cached_session():
mu = self._rng.rand(3, 5, 2)
chol, _ = self._random_chol(3, 5, 2, 2)
chol[1, 0, 0, 0] = -chol[1, 0, 0, 0]
chol[2, 3, 1, 1] = -chol[2, 3, 1, 1]
mvn = ds.MultivariateNormalTriL(mu, chol, validate_args=True)
# Shapes known at graph construction time.
self.assertEqual((2,), tuple(mvn.event_shape.as_list()))
self.assertEqual((3, 5), tuple(mvn.batch_shape.as_list()))
# Shapes known at runtime.
self.assertEqual((2,), tuple(mvn.event_shape_tensor().eval()))
self.assertEqual((3, 5), tuple(mvn.batch_shape_tensor().eval()))
def _random_mu_and_sigma(self, batch_shape, event_shape):
# This ensures sigma is positive def.
mat_shape = batch_shape + event_shape + event_shape
mat = self._rng.randn(*mat_shape)
perm = np.arange(mat.ndim)
perm[-2:] = [perm[-1], perm[-2]]
sigma = np.matmul(mat, np.transpose(mat, perm))
mu_shape = batch_shape + event_shape
mu = self._rng.randn(*mu_shape)
return mu, sigma
def testKLNonBatch(self):
batch_shape = []
event_shape = [2]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
mvn_b = ds.MultivariateNormalTriL(
loc=mu_b,
scale_tril=np.linalg.cholesky(sigma_b),
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl = _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b)
self.assertAllClose(expected_kl, kl_v)
def testKLBatch(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mu_b, sigma_b = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
mvn_b = ds.MultivariateNormalTriL(
loc=mu_b,
scale_tril=np.linalg.cholesky(sigma_b),
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b[0, :], sigma_b[0, :])
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b[1, :], sigma_b[1, :])
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLBatchBroadcast(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
# No batch shape.
mu_b, sigma_b = self._random_mu_and_sigma([], event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
mvn_b = ds.MultivariateNormalTriL(
loc=mu_b,
scale_tril=np.linalg.cholesky(sigma_b),
validate_args=True)
kl = ds.kl_divergence(mvn_a, mvn_b)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
expected_kl_0 = _compute_non_batch_kl(mu_a[0, :], sigma_a[0, :, :],
mu_b, sigma_b)
expected_kl_1 = _compute_non_batch_kl(mu_a[1, :], sigma_a[1, :, :],
mu_b, sigma_b)
self.assertAllClose(expected_kl_0, kl_v[0])
self.assertAllClose(expected_kl_1, kl_v[1])
def testKLTwoIdenticalDistributionsIsZero(self):
batch_shape = [2]
event_shape = [3]
with self.cached_session():
mu_a, sigma_a = self._random_mu_and_sigma(batch_shape, event_shape)
mvn_a = ds.MultivariateNormalTriL(
loc=mu_a,
scale_tril=np.linalg.cholesky(sigma_a),
validate_args=True)
# Should be zero since KL(p || p) = =.
kl = ds.kl_divergence(mvn_a, mvn_a)
self.assertEqual(batch_shape, kl.get_shape())
kl_v = kl.eval()
self.assertAllClose(np.zeros(*batch_shape), kl_v)
def testSampleLarge(self):
mu = np.array([-1., 1], dtype=np.float32)
scale_tril = np.array([[3., 0], [1, -2]], dtype=np.float32) / 3.
true_mean = mu
true_scale = scale_tril
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.cached_session() as sess:
dist = ds.MultivariateNormalTriL(
loc=mu,
scale_tril=scale_tril,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([0.5, 1.2], dtype=np.float32),
scale_tril=np.array([[3., 0], [1, 2]], dtype=np.float32),
validate_args=True)
n = int(10e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
scale = dist.scale.to_dense()
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
sample_kl_chol_, analytical_kl_chol_,
scale_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
sample_kl_chol, analytical_kl_chol,
scale,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(
2, "analytical_covariance:\n{}".format(analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.03)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.03)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.01)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
def _compute_non_batch_kl(mu_a, sigma_a, mu_b, sigma_b):
"""Non-batch KL for N(mu_a, sigma_a), N(mu_b, sigma_b)."""
# Check using numpy operations
# This mostly repeats the tensorflow code _kl_mvn_mvn(), but in numpy.
# So it is important to also check that KL(mvn, mvn) = 0.
sigma_b_inv = np.linalg.inv(sigma_b)
t = np.trace(sigma_b_inv.dot(sigma_a))
q = (mu_b - mu_a).dot(sigma_b_inv).dot(mu_b - mu_a)
k = mu_a.shape[0]
l = np.log(np.linalg.det(sigma_b) / np.linalg.det(sigma_a))
return 0.5 * (t + q - k + l)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_tril_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Bernoulli distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.kernel_tests import distribution_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConditionalDistributionTest(distribution_test.DistributionTest):
def _GetFakeDistribution(self):
class _FakeDistribution(distributions.ConditionalDistribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(_FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
def _sample_n(self, unused_shape, unused_seed, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _prob(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_cdf(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _log_survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
def _survival_function(self, _, arg1, arg2):
raise ValueError(arg1, arg2)
return _FakeDistribution
def testNotImplemented(self):
d = self._GetFakeDistribution()(batch_shape=[], event_shape=[])
for name in ["sample", "log_prob", "prob", "log_cdf", "cdf",
"log_survival_function", "survival_function"]:
method = getattr(d, name)
with self.assertRaisesRegexp(ValueError, "b1.*b2"):
method([] if name == "sample" else 1.0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/conditional_distribution_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Geometric distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import geometric
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# In all tests that follow, we use scipy.stats.geom, which
# represents the "Shifted" Geometric distribution. Hence, loc=-1 is passed
# in to each scipy function for testing.
class GeometricTest(test.TestCase):
def testGeometricShape(self):
with self.cached_session():
probs = constant_op.constant([.1] * 5)
geom = geometric.Geometric(probs=probs)
self.assertEqual([5,], geom.batch_shape_tensor().eval())
self.assertAllEqual([], geom.event_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([5]), geom.batch_shape)
self.assertEqual(tensor_shape.TensorShape([]), geom.event_shape)
def testInvalidP(self):
invalid_ps = [-.01, -0.01, -2.]
with self.cached_session():
with self.assertRaisesOpError("Condition x >= 0"):
geom = geometric.Geometric(probs=invalid_ps, validate_args=True)
geom.probs.eval()
invalid_ps = [1.1, 3., 5.]
with self.cached_session():
with self.assertRaisesOpError("Condition x <= y"):
geom = geometric.Geometric(probs=invalid_ps, validate_args=True)
geom.probs.eval()
def testGeomLogPmf(self):
with self.cached_session():
batch_size = 6
probs = constant_op.constant([.2] * batch_size)
probs_v = .2
x = np.array([2., 3., 4., 5., 6., 7.], dtype=np.float32)
geom = geometric.Geometric(probs=probs)
expected_log_prob = stats.geom.logpmf(x, probs_v, loc=-1)
log_prob = geom.log_prob(x)
self.assertEqual([6,], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob.eval())
pmf = geom.prob(x)
self.assertEqual([6,], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf.eval())
def testGeometricLogPmf_validate_args(self):
with self.cached_session():
batch_size = 6
probs = constant_op.constant([.9] * batch_size)
x = array_ops.placeholder(dtypes.float32, shape=[6])
feed_dict = {x: [2.5, 3.2, 4.3, 5.1, 6., 7.]}
geom = geometric.Geometric(probs=probs, validate_args=True)
with self.assertRaisesOpError("Condition x == y"):
log_prob = geom.log_prob(x)
log_prob.eval(feed_dict=feed_dict)
with self.assertRaisesOpError("Condition x >= 0"):
log_prob = geom.log_prob(np.array([-1.], dtype=np.float32))
log_prob.eval()
geom = geometric.Geometric(probs=probs)
log_prob = geom.log_prob(x)
self.assertEqual([6,], log_prob.get_shape())
pmf = geom.prob(x)
self.assertEqual([6,], pmf.get_shape())
def testGeometricLogPmfMultidimensional(self):
with self.cached_session():
batch_size = 6
probs = constant_op.constant([[.2, .3, .5]] * batch_size)
probs_v = np.array([.2, .3, .5])
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
geom = geometric.Geometric(probs=probs)
expected_log_prob = stats.geom.logpmf(x, probs_v, loc=-1)
log_prob = geom.log_prob(x)
log_prob_values = log_prob.eval()
self.assertEqual([6, 3], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob_values)
pmf = geom.prob(x)
pmf_values = pmf.eval()
self.assertEqual([6, 3], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf_values)
def testGeometricCDF(self):
with self.cached_session():
batch_size = 6
probs = constant_op.constant([[.2, .4, .5]] * batch_size)
probs_v = np.array([.2, .4, .5])
x = np.array([[2., 3., 4., 5.5, 6., 7.]], dtype=np.float32).T
geom = geometric.Geometric(probs=probs)
expected_cdf = stats.geom.cdf(x, probs_v, loc=-1)
cdf = geom.cdf(x)
self.assertEqual([6, 3], cdf.get_shape())
self.assertAllClose(expected_cdf, cdf.eval())
def testGeometricEntropy(self):
with self.cached_session():
probs_v = np.array([.1, .3, .25], dtype=np.float32)
geom = geometric.Geometric(probs=probs_v)
expected_entropy = stats.geom.entropy(probs_v, loc=-1)
self.assertEqual([3], geom.entropy().get_shape())
self.assertAllClose(expected_entropy, geom.entropy().eval())
def testGeometricMean(self):
with self.cached_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_means = stats.geom.mean(probs_v, loc=-1)
self.assertEqual([3], geom.mean().get_shape())
self.assertAllClose(expected_means, geom.mean().eval())
def testGeometricVariance(self):
with self.cached_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_vars = stats.geom.var(probs_v, loc=-1)
self.assertEqual([3], geom.variance().get_shape())
self.assertAllClose(expected_vars, geom.variance().eval())
def testGeometricStddev(self):
with self.cached_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
expected_stddevs = stats.geom.std(probs_v, loc=-1)
self.assertEqual([3], geom.stddev().get_shape())
self.assertAllClose(geom.stddev().eval(), expected_stddevs)
def testGeometricMode(self):
with self.cached_session():
probs_v = np.array([.1, .3, .25])
geom = geometric.Geometric(probs=probs_v)
self.assertEqual([3,], geom.mode().get_shape())
self.assertAllClose([0.] * 3, geom.mode().eval())
def testGeometricSample(self):
with self.cached_session():
probs_v = [.3, .9]
probs = constant_op.constant(probs_v)
n = constant_op.constant(100000)
geom = geometric.Geometric(probs=probs)
samples = geom.sample(n, seed=12345)
self.assertEqual([100000, 2], samples.get_shape())
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
for i in range(2):
self.assertAllClose(sample_values[:, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
def testGeometricSampleMultiDimensional(self):
with self.cached_session():
batch_size = 2
probs_v = [.3, .9]
probs = constant_op.constant([probs_v] * batch_size)
geom = geometric.Geometric(probs=probs)
n = 400000
samples = geom.sample(n, seed=12345)
self.assertEqual([n, batch_size, 2], samples.get_shape())
sample_values = samples.eval()
self.assertFalse(np.any(sample_values < 0.0))
for i in range(2):
self.assertAllClose(sample_values[:, 0, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 0, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 1, i].mean(),
stats.geom.mean(probs_v[i], loc=-1),
rtol=.02)
self.assertAllClose(sample_values[:, 1, i].var(),
stats.geom.var(probs_v[i], loc=-1),
rtol=.02)
def testGeometricAtBoundary(self):
with self.cached_session():
geom = geometric.Geometric(probs=1., validate_args=True)
x = np.array([0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
expected_log_prob = stats.geom.logpmf(x, [1.], loc=-1)
# Scipy incorrectly returns nan.
expected_log_prob[np.isnan(expected_log_prob)] = 0.
log_prob = geom.log_prob(x)
self.assertEqual([7,], log_prob.get_shape())
self.assertAllClose(expected_log_prob, log_prob.eval())
pmf = geom.prob(x)
self.assertEqual([7,], pmf.get_shape())
self.assertAllClose(np.exp(expected_log_prob), pmf.eval())
expected_log_cdf = stats.geom.logcdf(x, 1., loc=-1)
log_cdf = geom.log_cdf(x)
self.assertEqual([7,], log_cdf.get_shape())
self.assertAllClose(expected_log_cdf, log_cdf.eval())
cdf = geom.cdf(x)
self.assertEqual([7,], cdf.get_shape())
self.assertAllClose(np.exp(expected_log_cdf), cdf.eval())
expected_mean = stats.geom.mean(1., loc=-1)
self.assertEqual([], geom.mean().get_shape())
self.assertAllClose(expected_mean, geom.mean().eval())
expected_variance = stats.geom.var(1., loc=-1)
self.assertEqual([], geom.variance().get_shape())
self.assertAllClose(expected_variance, geom.variance().eval())
with self.assertRaisesOpError("Entropy is undefined"):
geom.entropy().eval()
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/geometric_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class EstimatorHeadDistributionRegressionTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def testNormalLocScaleLogits(self):
# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1.
scale_bias = np.log(np.expm1(1.))
def softplus(x):
return np.log1p(np.exp(x))
def actual_loss(logits, labels):
mu = actual_mean(logits)
sigma = actual_stddev(logits)
labels = np.squeeze(labels, -1)
z = (labels - mu) / sigma
loss = 0.5 * (z**2. + np.log(2. * np.pi)) + np.log(sigma)
return loss.mean()
def actual_mean(logits):
return logits[..., 0]
def actual_stddev(logits):
return softplus(logits[..., 1] + scale_bias)
def make_distribution_fn(logits):
return normal_lib.Normal(
loc=logits[..., 0],
scale=nn_ops.softplus(logits[..., 1] + scale_bias))
head = estimator_lib.estimator_head_distribution_regression(
make_distribution_fn,
logits_dimension=2)
labels = np.float32([[-1.],
[0.],
[1.]])
logits = np.float32([[0., -1],
[1, 0.5],
[-1, 1]])
with ops.Graph().as_default(), session.Session():
# Convert to tensor so we can index into head.distributions.
tflogits = ops.convert_to_tensor(logits, name="logits")
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=tflogits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = actual_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
# Now we verify the underlying distribution was correctly constructed.
expected_mean = logits[..., 0]
self.assertAllClose(
expected_mean,
head.distribution(tflogits).mean().eval(),
rtol=1e-6, atol=0.)
expected_stddev = softplus(logits[..., 1] + scale_bias)
self.assertAllClose(
expected_stddev,
head.distribution(tflogits).stddev().eval(),
rtol=1e-6, atol=0.)
# Should have created only one distribution.
self.assertEqual(1, len(head.distributions))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/estimator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wishart."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
distributions = distributions_lib
def make_pd(start, n):
"""Deterministically create a positive definite matrix."""
x = np.tril(linalg.circulant(np.arange(start, start + n)))
return np.dot(x, x.T)
def chol(x):
"""Compute Cholesky factorization."""
return linalg.cholesky(x).T
def wishart_var(df, x):
"""Compute Wishart variance for numpy scale matrix."""
x = np.sqrt(df) * np.asarray(x)
d = np.expand_dims(np.diag(x), -1)
return x**2 + np.dot(d, d.T)
class WishartCholeskyTest(test.TestCase):
def testEntropy(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
# sp.stats.wishart(df=4, scale=make_pd(1., 2)).entropy()
self.assertAllClose(6.301387092430769, w.entropy().eval())
w = distributions.WishartCholesky(df=1, scale=[[1.]])
# sp.stats.wishart(df=1,scale=1).entropy()
self.assertAllClose(0.78375711047393404, w.entropy().eval())
def testMeanLogDetAndLogNormalizingConstant(self):
with self.cached_session():
def entropy_alt(w):
return (
w.log_normalization()
- 0.5 * (w.df - w.dimension - 1.) * w.mean_log_det()
+ 0.5 * w.df * w.dimension).eval()
w = distributions.WishartCholesky(df=4,
scale=chol(make_pd(1., 2)))
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
w = distributions.WishartCholesky(df=5, scale=[[1.]])
self.assertAllClose(w.entropy().eval(), entropy_alt(w))
def testMean(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(df * scale, w.mean().eval())
def testMode(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual((df - 2. - 1.) * scale, w.mode().eval())
def testStd(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(chol(wishart_var(df, scale)), w.stddev().eval())
def testVariance(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
w = distributions.WishartCholesky(df, chol(scale))
self.assertAllEqual(wishart_var(df, scale), w.variance().eval())
def testSample(self):
with self.cached_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
chol_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample(1, seed=42).eval())
eigen_values = array_ops.matrix_diag_part(
full_w_chol.sample(
1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, axis=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (math_ops.reduce_mean(math_ops.square(x), axis=[0]) -
math_ops.square(moment1_estimate)).eval()
self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05)
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
with self.cached_session():
df = 4.
n_val = 100
random_seed.set_random_seed(654321)
chol_w1 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart1")
samples1 = chol_w1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
chol_w2 = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False,
name="wishart2")
samples2 = chol_w2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testProb(self):
with self.cached_session():
# Generate some positive definite (pd) matrices and their Cholesky
# factorizations.
x = np.array(
[make_pd(1., 2), make_pd(2., 2), make_pd(3., 2), make_pd(4., 2)])
chol_x = np.array([chol(x[0]), chol(x[1]), chol(x[2]), chol(x[3])])
# Since Wishart wasn"t added to SciPy until 0.16, we'll spot check some
# pdfs with hard-coded results from upstream SciPy.
log_prob_df_seq = np.array([
# math.log(stats.wishart.pdf(x[0], df=2+0, scale=x[0]))
-3.5310242469692907,
# math.log(stats.wishart.pdf(x[1], df=2+1, scale=x[1]))
-7.689907330328961,
# math.log(stats.wishart.pdf(x[2], df=2+2, scale=x[2]))
-10.815845159537895,
# math.log(stats.wishart.pdf(x[3], df=2+3, scale=x[3]))
-13.640549882916691,
])
# This test checks that batches don't interfere with correctness.
w = distributions.WishartCholesky(
df=[2, 3, 4, 5],
scale=chol_x,
cholesky_input_output_matrices=True)
self.assertAllClose(log_prob_df_seq, w.log_prob(chol_x).eval())
# Now we test various constructions of Wishart with different sample
# shape.
log_prob = np.array([
# math.log(stats.wishart.pdf(x[0], df=4, scale=x[0]))
-4.224171427529236,
# math.log(stats.wishart.pdf(x[1], df=4, scale=x[0]))
-6.3378770664093453,
# math.log(stats.wishart.pdf(x[2], df=4, scale=x[0]))
-12.026946850193017,
# math.log(stats.wishart.pdf(x[3], df=4, scale=x[0]))
-20.951582705289454,
])
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=False),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=False)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
for w in (
distributions.WishartCholesky(
df=4,
scale=chol_x[0],
cholesky_input_output_matrices=True),
distributions.WishartFull(
df=4,
scale=x[0],
cholesky_input_output_matrices=True)):
self.assertAllEqual((2, 2), w.event_shape_tensor().eval())
self.assertEqual(2, w.dimension.eval())
self.assertAllClose(log_prob[0], w.log_prob(chol_x[0]).eval())
self.assertAllClose(log_prob[0:2], w.log_prob(chol_x[0:2]).eval())
self.assertAllClose(
np.reshape(log_prob, (2, 2)),
w.log_prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllClose(
np.reshape(np.exp(log_prob), (2, 2)),
w.prob(np.reshape(chol_x, (2, 2, 2, 2))).eval())
self.assertAllEqual((2, 2),
w.log_prob(np.reshape(x, (2, 2, 2, 2))).get_shape())
def testBatchShape(self):
with self.cached_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([], w.batch_shape)
self.assertAllEqual([], w.batch_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2], w.batch_shape)
self.assertAllEqual([2], w.batch_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[], sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2],
sess.run(w.batch_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testEventShape(self):
with self.cached_session() as sess:
scale = make_pd(1., 2)
chol_scale = chol(scale)
w = distributions.WishartCholesky(df=4, scale=chol_scale)
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
w = distributions.WishartCholesky(
df=[4., 4], scale=np.array([chol_scale, chol_scale]))
self.assertAllEqual([2, 2], w.event_shape)
self.assertAllEqual([2, 2], w.event_shape_tensor().eval())
scale_deferred = array_ops.placeholder(dtypes.float32)
w = distributions.WishartCholesky(df=4, scale=scale_deferred)
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: chol_scale}))
self.assertAllEqual(
[2, 2],
sess.run(w.event_shape_tensor(),
feed_dict={scale_deferred: [chol_scale, chol_scale]}))
def testValidateArgs(self):
with self.cached_session() as sess:
df_deferred = array_ops.placeholder(dtypes.float32)
chol_scale_deferred = array_ops.placeholder(dtypes.float32)
x = make_pd(1., 3)
chol_scale = chol(x)
# Check expensive, deferred assertions.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"cannot be less than"):
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=True)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 2.,
chol_scale_deferred: chol_scale})
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Cholesky decomposition was not successful"):
chol_w = distributions.WishartFull(
df=df_deferred, scale=chol_scale_deferred)
# np.ones((3, 3)) is not positive, definite.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={
df_deferred: 4.,
chol_scale_deferred: np.ones(
(3, 3), dtype=np.float32)
})
with self.assertRaisesOpError("scale must be square"):
chol_w = distributions.WishartCholesky(
df=4.,
scale=np.array([[2., 3., 4.], [1., 2., 3.]], dtype=np.float32),
validate_args=True)
sess.run(chol_w.scale().eval())
# Ensure no assertions.
chol_w = distributions.WishartCholesky(
df=df_deferred,
scale=chol_scale_deferred,
validate_args=False)
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: chol_scale})
# Bogus log_prob, but since we have no checks running... c"est la vie.
sess.run(chol_w.log_prob(np.asarray(
x, dtype=np.float32)),
feed_dict={df_deferred: 4,
chol_scale_deferred: np.ones((3, 3))})
def testStaticAsserts(self):
with self.cached_session():
x = make_pd(1., 3)
chol_scale = chol(x)
# Still has these assertions because they're resolveable at graph
# construction
with self.assertRaisesRegexp(ValueError, "cannot be less than"):
distributions.WishartCholesky(
df=2, scale=chol_scale, validate_args=False)
with self.assertRaisesRegexp(TypeError, "Argument tril must have dtype"):
distributions.WishartCholesky(
df=4.,
scale=np.asarray(
chol_scale, dtype=np.int32),
validate_args=False)
def testSampleBroadcasts(self):
dims = 2
batch_shape = [2, 3]
sample_shape = [2, 1]
scale = np.float32([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale, scale], axis=0),
batch_shape + [dims, dims])
wishart = distributions.WishartFull(df=5, scale=scale)
x = wishart.sample(sample_shape, seed=42)
with self.cached_session() as sess:
x_ = sess.run(x)
expected_shape = sample_shape + batch_shape + [dims, dims]
self.assertAllEqual(expected_shape, x.shape)
self.assertAllEqual(expected_shape, x_.shape)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/wishart_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import kumaraswamy as kumaraswamy_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
def _kumaraswamy_mode(a, b):
a = np.asarray(a)
b = np.asarray(b)
return ((a - 1) / (a * b - 1))**(1 / a)
def _kumaraswamy_moment(a, b, n):
a = np.asarray(a)
b = np.asarray(b)
return b * special.beta(1.0 + n / a, b)
def _harmonic_number(b):
b = np.asarray(b)
return special.psi(b + 1) - special.psi(1)
def _kumaraswamy_cdf(a, b, x):
a = np.asarray(a)
b = np.asarray(b)
x = np.asarray(x)
return 1 - (1 - x**a)**b
def _kumaraswamy_pdf(a, b, x):
a = np.asarray(a)
b = np.asarray(b)
x = np.asarray(x)
return a * b * x ** (a - 1) * (1 - x ** a) ** (b - 1)
class KumaraswamyTest(test.TestCase):
def testSimpleShapes(self):
with self.cached_session():
a = np.random.rand(3)
b = np.random.rand(3)
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3]), dist.batch_shape)
def testComplexShapes(self):
with self.cached_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testComplexShapesBroadcast(self):
with self.cached_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2, 2]), dist.batch_shape)
def testAProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.cached_session():
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual([1, 3], dist.concentration1.get_shape())
self.assertAllClose(a, dist.concentration1.eval())
def testBProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.cached_session():
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual([1, 3], dist.concentration0.get_shape())
self.assertAllClose(b, dist.concentration0.eval())
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.cached_session():
dist = kumaraswamy_lib.Kumaraswamy(a, b, validate_args=True)
dist.prob([.1, .3, .6]).eval()
dist.prob([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("sample must be non-negative"):
dist.prob([-1., 0.1, 0.5]).eval()
with self.assertRaisesOpError("sample must be no larger than `1`"):
dist.prob([.1, .2, 1.2]).eval()
def testPdfTwoBatches(self):
with self.cached_session():
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = kumaraswamy_lib.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfTwoBatchesNontrivialX(self):
with self.cached_session():
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = kumaraswamy_lib.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfUniformZeroBatch(self):
with self.cached_session():
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = kumaraswamy_lib.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((5,), pdf.get_shape())
def testPdfAStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = kumaraswamy_lib.Kumaraswamy(a, b)
pdf = dist.prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfAStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = kumaraswamy_lib.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = kumaraswamy_lib.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = kumaraswamy_lib.Kumaraswamy(a, b).prob(x)
expected_pdf = _kumaraswamy_pdf(a, b, x)
self.assertAllClose(expected_pdf, pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testKumaraswamyMean(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual(dist.mean().get_shape(), (3,))
if not stats:
return
expected_mean = _kumaraswamy_moment(a, b, 1)
self.assertAllClose(expected_mean, dist.mean().eval())
def testKumaraswamyVariance(self):
with session.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual(dist.variance().get_shape(), (3,))
if not stats:
return
expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment(
a, b, 1)**2
self.assertAllClose(expected_variance, dist.variance().eval())
def testKumaraswamyMode(self):
with session.Session():
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = _kumaraswamy_mode(a, b)
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual(dist.mode().get_shape(), (3,))
self.assertAllClose(expected_mode, dist.mode().eval())
def testKumaraswamyModeInvalid(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = kumaraswamy_lib.Kumaraswamy(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Mode undefined for concentration1 <= 1."):
dist.mode().eval()
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = kumaraswamy_lib.Kumaraswamy(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Mode undefined for concentration0 <= 1."):
dist.mode().eval()
def testKumaraswamyModeEnableAllowNanStats(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = kumaraswamy_lib.Kumaraswamy(a, b, allow_nan_stats=True)
expected_mode = _kumaraswamy_mode(a, b)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = kumaraswamy_lib.Kumaraswamy(a, b, allow_nan_stats=True)
expected_mode = _kumaraswamy_mode(a, b)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
def testKumaraswamyEntropy(self):
with session.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = kumaraswamy_lib.Kumaraswamy(a, b)
self.assertEqual(dist.entropy().get_shape(), (3,))
if not stats:
return
expected_entropy = (1 - 1. / a) + (
1 - 1. / b) * _harmonic_number(b) + np.log(a * b)
self.assertAllClose(expected_entropy, dist.entropy().eval())
def testKumaraswamySample(self):
with self.cached_session():
a = 1.
b = 2.
kumaraswamy = kumaraswamy_lib.Kumaraswamy(a, b)
n = constant_op.constant(100000)
samples = kumaraswamy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertLess(
stats.kstest(
# Kumaraswamy is a univariate distribution.
sample_values,
lambda x: _kumaraswamy_cdf(1., 2., x))[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
expected_mean = _kumaraswamy_moment(a, b, 1)
self.assertAllClose(sample_values.mean(axis=0), expected_mean, atol=1e-2)
expected_variance = _kumaraswamy_moment(a, b, 2) - _kumaraswamy_moment(
a, b, 1)**2
self.assertAllClose(
np.cov(sample_values, rowvar=0), expected_variance, atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
def testKumaraswamySampleMultipleTimes(self):
with self.cached_session():
a_val = 1.
b_val = 2.
n_val = 100
random_seed.set_random_seed(654321)
kumaraswamy1 = kumaraswamy_lib.Kumaraswamy(
concentration1=a_val, concentration0=b_val, name="kumaraswamy1")
samples1 = kumaraswamy1.sample(n_val, seed=123456).eval()
random_seed.set_random_seed(654321)
kumaraswamy2 = kumaraswamy_lib.Kumaraswamy(
concentration1=a_val, concentration0=b_val, name="kumaraswamy2")
samples2 = kumaraswamy2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testKumaraswamySampleMultidimensional(self):
with self.cached_session():
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
kumaraswamy = kumaraswamy_lib.Kumaraswamy(a, b)
n = constant_op.constant(100000)
samples = kumaraswamy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
if not stats:
return
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
_kumaraswamy_moment(a, b, 1)[1, :],
atol=1e-1)
def testKumaraswamyCdf(self):
with self.cached_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = kumaraswamy_lib.Kumaraswamy(a, b).cdf(x).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(
_kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0)
def testKumaraswamyLogCdf(self):
with self.cached_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = math_ops.exp(kumaraswamy_lib.Kumaraswamy(a,
b).log_cdf(x)).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
if not stats:
return
self.assertAllClose(
_kumaraswamy_cdf(a, b, x), actual, rtol=1e-4, atol=0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/kumaraswamy_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import chi2 as chi2_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Chi2Test(test.TestCase):
def testChi2LogPDF(self):
with self.cached_session():
batch_size = 6
df = constant_op.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = chi2_lib.Chi2(df=df)
expected_log_pdf = stats.chi2.logpdf(x, df_v)
log_pdf = chi2.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = chi2.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testChi2CDF(self):
with self.cached_session():
batch_size = 6
df = constant_op.constant([2.0] * batch_size, dtype=np.float64)
df_v = 2.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float64)
chi2 = chi2_lib.Chi2(df=df)
expected_cdf = stats.chi2.cdf(x, df_v)
cdf = chi2.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testChi2Mean(self):
with self.cached_session():
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_mean = stats.chi2.mean(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.mean().get_shape(), (3,))
self.assertAllClose(chi2.mean().eval(), expected_mean)
def testChi2Variance(self):
with self.cached_session():
df_v = np.array([1., 3, 5], np.float64)
expected_variances = stats.chi2.var(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.variance().get_shape(), (3,))
self.assertAllClose(chi2.variance().eval(), expected_variances)
def testChi2Entropy(self):
with self.cached_session():
df_v = np.array([1., 3, 5], dtype=np.float64)
expected_entropy = stats.chi2.entropy(df_v)
chi2 = chi2_lib.Chi2(df=df_v)
self.assertEqual(chi2.entropy().get_shape(), (3,))
self.assertAllClose(chi2.entropy().eval(), expected_entropy)
def testChi2WithAbsDf(self):
with self.cached_session():
df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
chi2 = chi2_lib.Chi2WithAbsDf(df=df_v)
self.assertAllClose(
math_ops.floor(math_ops.abs(df_v)).eval(),
chi2.df.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/chi2_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateStudentsT Distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy import special
from tensorflow.contrib.distributions.python.ops.vector_student_t import _VectorStudentT
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _FakeVectorStudentT(object):
"""Fake scipy implementation for Multivariate Student's t-distribution.
Technically we don't need to test the `Vector Student's t-distribution` since
its composed of only unit-tested parts. However this _FakeVectorStudentT
serves as something like an end-to-end test of the
`TransformedDistribution + Affine` API.
Other `Vector*` implementations need only test new code. That we don't need
to test every Vector* distribution is good because there aren't SciPy
analogs and reimplementing everything in NumPy sort of defeats the point of
having the `TransformedDistribution + Affine` API.
"""
def __init__(self, df, loc, scale_tril):
self._df = np.asarray(df)
self._loc = np.asarray(loc)
self._scale_tril = np.asarray(scale_tril)
def log_prob(self, x):
def _compute(df, loc, scale_tril, x):
k = scale_tril.shape[-1]
ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
logz = ildj + k * (0.5 * np.log(df) +
0.5 * np.log(np.pi) +
special.gammaln(0.5 * df) -
special.gammaln(0.5 * (df + 1.)))
y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
lower=True, overwrite_b=True)
logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
return logs - logz
if not self._df.shape:
return _compute(self._df, self._loc, self._scale_tril, x)
return np.concatenate([
[_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
for i in range(len(self._df))]).T
def prob(self, x):
return np.exp(self.log_prob(x))
class VectorStudentTTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testProbStaticScalar(self):
with self.cached_session():
# Scalar batch_shape.
df = np.asarray(3., dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1], dtype=np.float32)
scale_diag = np.asarray([2.], dtype=np.float32)
scale_tril = np.diag(scale_diag)
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
x = 2. * self._rng.rand(4, 1).astype(np.float32) - 1.
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbStatic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransform(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransform(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransformDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PoissonLogNormalQuadratureCompoundTest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import poisson_lognormal
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _PoissonLogNormalQuadratureCompoundTest(
test_util.DiscreteScalarDistributionTestHelpers):
"""Tests the PoissonLogNormalQuadratureCompoundTest distribution."""
def testSampleProbConsistent(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
-2.,
shape=[] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.1,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=1, rtol=0.1)
def testMeanVariance(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
0.,
shape=[] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.02)
def testSampleProbConsistentBroadcastScalar(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[0., -0.5],
shape=[2] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=2, rtol=0.1, atol=0.01)
def testMeanVarianceBroadcastScalar(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[0., -0.5],
shape=[2] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
1.,
shape=[] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.1, atol=0.01)
def testSampleProbConsistentBroadcastBoth(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[[0.], [-0.5]],
shape=[2, 1] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
[[1., 0.9]],
shape=[1, 2] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, pln, batch_size=4, rtol=0.1, atol=0.08)
def testMeanVarianceBroadcastBoth(self):
with self.cached_session() as sess:
pln = poisson_lognormal.PoissonLogNormalQuadratureCompound(
loc=array_ops.placeholder_with_default(
[[0.], [-0.5]],
shape=[2, 1] if self.static_shape else None),
scale=array_ops.placeholder_with_default(
[[1., 0.9]],
shape=[1, 2] if self.static_shape else None),
quadrature_size=10,
validate_args=True)
self.run_test_sample_consistent_mean_variance(
sess.run, pln, rtol=0.1, atol=0.01)
class PoissonLogNormalQuadratureCompoundStaticShapeTest(
_PoissonLogNormalQuadratureCompoundTest, test.TestCase):
@property
def static_shape(self):
return True
class PoissonLogNormalQuadratureCompoundDynamicShapeTest(
_PoissonLogNormalQuadratureCompoundTest, test.TestCase):
@property
def static_shape(self):
return False
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/poisson_lognormal_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
class MultivariateNormalDiagPlusLowRankTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testDiagBroadcastBothBatchAndEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [1], event_shape: []
identity_multiplier = np.array([5.])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 5, 0],
[0, 4 + 5]],
[[5 + 5, 0],
[0, 6 + 5]]]),
dist.scale.to_dense().eval())
def testDiagBroadcastBothBatchAndEvent2(self):
# This test differs from `testDiagBroadcastBothBatchAndEvent` in that it
# broadcasts batch_shape's from both the `scale_diag` and
# `scale_identity_multiplier` args.
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3, 1], event_shape: []
identity_multiplier = np.array([[5.], [4], [3]])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllEqual(
[3, 3, 2, 2],
dist.scale.to_dense().get_shape())
def testDiagBroadcastOnlyEvent(self):
# batch_shape: [3], event_shape: [2]
diag = np.array([[1., 2], [3, 4], [5, 6]])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
scale_diag=diag,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[1. + 5, 0],
[0, 2 + 5]],
[[3 + 4, 0],
[0, 4 + 4]],
[[5 + 3, 0],
[0, 6 + 3]]]), # shape: [3, 2, 2]
dist.scale.to_dense().eval())
def testDiagBroadcastMultiplierAndLoc(self):
# batch_shape: [], event_shape: [3]
loc = np.array([1., 0, -1])
# batch_shape: [3], event_shape: []
identity_multiplier = np.array([5., 4, 3])
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_identity_multiplier=identity_multiplier,
validate_args=True)
self.assertAllClose(
np.array([[[5, 0, 0],
[0, 5, 0],
[0, 0, 5]],
[[4, 0, 0],
[0, 4, 0],
[0, 0, 4]],
[[3, 0, 0],
[0, 3, 0],
[0, 0, 3]]]),
dist.scale.to_dense().eval())
def testMean(self):
mu = [-1.0, 1.0]
diag_large = [1.0, 5.0]
v = [[2.0], [3.0]]
diag_small = [3.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testSample(self):
# TODO(jvdillon): This test should be the basis of a new test fixture which
# is applied to every distribution. When we make this fixture, we'll also
# separate the analytical- and sample-based tests as well as for each
# function tested. For now, we group things so we can recycle one batch of
# samples (thus saving resources).
mu = np.array([-1., 1, 0.5], dtype=np.float32)
diag_large = np.array([1., 0.5, 0.75], dtype=np.float32)
diag_small = np.array([-1.1, 1.2], dtype=np.float32)
v = np.array([[0.7, 0.8],
[0.9, 1],
[0.5, 0.6]], dtype=np.float32) # shape: [k, r] = [3, 2]
true_mean = mu
true_scale = np.diag(diag_large) + np.matmul(np.matmul(
v, np.diag(diag_small)), v.T)
true_covariance = np.matmul(true_scale, true_scale.T)
true_variance = np.diag(true_covariance)
true_stddev = np.sqrt(true_variance)
with self.cached_session() as sess:
dist = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_diag=diag_large,
scale_perturb_factor=v,
scale_perturb_diag=diag_small,
validate_args=True)
# The following distributions will test the KL divergence calculation.
mvn_identity = ds.MultivariateNormalDiag(
loc=np.array([1., 2, 0.25], dtype=np.float32),
validate_args=True)
mvn_scaled = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_identity_multiplier=2.2,
validate_args=True)
mvn_diag = ds.MultivariateNormalDiag(
loc=mvn_identity.loc,
scale_diag=np.array([0.5, 1.5, 1.], dtype=np.float32),
validate_args=True)
mvn_chol = ds.MultivariateNormalTriL(
loc=np.array([1., 2, -1], dtype=np.float32),
scale_tril=np.array([[6., 0, 0],
[2, 5, 0],
[1, 3, 4]], dtype=np.float32) / 10.,
validate_args=True)
scale = dist.scale.to_dense()
n = int(30e3)
samps = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(samps, 0)
x = samps - sample_mean
sample_covariance = math_ops.matmul(x, x, transpose_a=True) / n
sample_kl_identity = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity = ds.kl_divergence(dist, mvn_identity)
sample_kl_scaled = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled = ds.kl_divergence(dist, mvn_scaled)
sample_kl_diag = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag = ds.kl_divergence(dist, mvn_diag)
sample_kl_chol = math_ops.reduce_mean(
dist.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol = ds.kl_divergence(dist, mvn_chol)
n = int(10e3)
baseline = ds.MultivariateNormalDiag(
loc=np.array([-1., 0.25, 1.25], dtype=np.float32),
scale_diag=np.array([1.5, 0.5, 1.], dtype=np.float32),
validate_args=True)
samps = baseline.sample(n, seed=0)
sample_kl_identity_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_identity.log_prob(samps), 0)
analytical_kl_identity_diag_baseline = ds.kl_divergence(
baseline, mvn_identity)
sample_kl_scaled_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_scaled.log_prob(samps), 0)
analytical_kl_scaled_diag_baseline = ds.kl_divergence(
baseline, mvn_scaled)
sample_kl_diag_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_diag.log_prob(samps), 0)
analytical_kl_diag_diag_baseline = ds.kl_divergence(baseline, mvn_diag)
sample_kl_chol_diag_baseline = math_ops.reduce_mean(
baseline.log_prob(samps) - mvn_chol.log_prob(samps), 0)
analytical_kl_chol_diag_baseline = ds.kl_divergence(baseline, mvn_chol)
[
sample_mean_,
analytical_mean_,
sample_covariance_,
analytical_covariance_,
analytical_variance_,
analytical_stddev_,
scale_,
sample_kl_identity_, analytical_kl_identity_,
sample_kl_scaled_, analytical_kl_scaled_,
sample_kl_diag_, analytical_kl_diag_,
sample_kl_chol_, analytical_kl_chol_,
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
sample_kl_scaled_diag_baseline_, analytical_kl_scaled_diag_baseline_,
sample_kl_diag_diag_baseline_, analytical_kl_diag_diag_baseline_,
sample_kl_chol_diag_baseline_, analytical_kl_chol_diag_baseline_,
] = sess.run([
sample_mean,
dist.mean(),
sample_covariance,
dist.covariance(),
dist.variance(),
dist.stddev(),
scale,
sample_kl_identity, analytical_kl_identity,
sample_kl_scaled, analytical_kl_scaled,
sample_kl_diag, analytical_kl_diag,
sample_kl_chol, analytical_kl_chol,
sample_kl_identity_diag_baseline,
analytical_kl_identity_diag_baseline,
sample_kl_scaled_diag_baseline, analytical_kl_scaled_diag_baseline,
sample_kl_diag_diag_baseline, analytical_kl_diag_diag_baseline,
sample_kl_chol_diag_baseline, analytical_kl_chol_diag_baseline,
])
sample_variance_ = np.diag(sample_covariance_)
sample_stddev_ = np.sqrt(sample_variance_)
logging.vlog(2, "true_mean:\n{} ".format(true_mean))
logging.vlog(2, "sample_mean:\n{}".format(sample_mean_))
logging.vlog(2, "analytical_mean:\n{}".format(analytical_mean_))
logging.vlog(2, "true_covariance:\n{}".format(true_covariance))
logging.vlog(2, "sample_covariance:\n{}".format(sample_covariance_))
logging.vlog(2, "analytical_covariance:\n{}".format(
analytical_covariance_))
logging.vlog(2, "true_variance:\n{}".format(true_variance))
logging.vlog(2, "sample_variance:\n{}".format(sample_variance_))
logging.vlog(2, "analytical_variance:\n{}".format(analytical_variance_))
logging.vlog(2, "true_stddev:\n{}".format(true_stddev))
logging.vlog(2, "sample_stddev:\n{}".format(sample_stddev_))
logging.vlog(2, "analytical_stddev:\n{}".format(analytical_stddev_))
logging.vlog(2, "true_scale:\n{}".format(true_scale))
logging.vlog(2, "scale:\n{}".format(scale_))
logging.vlog(2, "kl_identity: analytical:{} sample:{}".format(
analytical_kl_identity_, sample_kl_identity_))
logging.vlog(2, "kl_scaled: analytical:{} sample:{}".format(
analytical_kl_scaled_, sample_kl_scaled_))
logging.vlog(2, "kl_diag: analytical:{} sample:{}".format(
analytical_kl_diag_, sample_kl_diag_))
logging.vlog(2, "kl_chol: analytical:{} sample:{}".format(
analytical_kl_chol_, sample_kl_chol_))
logging.vlog(
2, "kl_identity_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_identity_diag_baseline_,
sample_kl_identity_diag_baseline_))
logging.vlog(
2, "kl_scaled_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_scaled_diag_baseline_,
sample_kl_scaled_diag_baseline_))
logging.vlog(2, "kl_diag_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_diag_diag_baseline_,
sample_kl_diag_diag_baseline_))
logging.vlog(2, "kl_chol_diag_baseline: analytical:{} sample:{}".format(
analytical_kl_chol_diag_baseline_,
sample_kl_chol_diag_baseline_))
self.assertAllClose(true_mean, sample_mean_,
atol=0., rtol=0.02)
self.assertAllClose(true_mean, analytical_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(true_covariance, sample_covariance_,
atol=0., rtol=0.02)
self.assertAllClose(true_covariance, analytical_covariance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_variance, sample_variance_,
atol=0., rtol=0.02)
self.assertAllClose(true_variance, analytical_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(true_stddev, sample_stddev_,
atol=0., rtol=0.02)
self.assertAllClose(true_stddev, analytical_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(true_scale, scale_,
atol=0., rtol=1e-6)
self.assertAllClose(sample_kl_identity_, analytical_kl_identity_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_scaled_, analytical_kl_scaled_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_diag_, analytical_kl_diag_,
atol=0., rtol=0.02)
self.assertAllClose(sample_kl_chol_, analytical_kl_chol_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_identity_diag_baseline_,
analytical_kl_identity_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_scaled_diag_baseline_,
analytical_kl_scaled_diag_baseline_,
atol=0., rtol=0.02)
self.assertAllClose(
sample_kl_diag_diag_baseline_,
analytical_kl_diag_diag_baseline_,
atol=0., rtol=0.04)
self.assertAllClose(
sample_kl_chol_diag_baseline_,
analytical_kl_chol_diag_baseline_,
atol=0., rtol=0.02)
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.cached_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_plus_low_rank_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import batch_reshape as batch_reshape_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_lib
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops import wishart as wishart_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = wishart_lib.WishartFull(df=5, scale=scale_ph)
reshape_wishart = batch_reshape_lib.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=42)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=42)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = array_ops.reshape(
wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = array_ops.reshape(
wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = array_ops.reshape(
wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = array_ops.reshape(
wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = array_ops.reshape(
wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = array_ops.reshape(
wishart.variance(), expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = normal_lib.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = batch_reshape_lib.BatchReshape(
distribution=normal,
batch_shape=new_batch_shape_ph,
validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=52)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=52)
expected_log_prob_shape = new_batch_shape
expected_log_prob = array_ops.reshape(
normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = array_ops.reshape(
normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = array_ops.reshape(
normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = array_ops.reshape(
normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = array_ops.reshape(
normal.variance(), expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=62)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=62)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = array_ops.reshape(
mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = array_ops.reshape(
mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = array_ops.reshape(
mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = array_ops.reshape(
mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = array_ops.reshape(
mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = array_ops.reshape(
mvn.covariance(), expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
expected_covariance_, actual_covariance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
expected_covariance, actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2] # 2 != 2*3
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r"Shape sizes do not match."):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
# Unknown first dimension does not trigger size check. Note that
# any dimension < 0 is treated statically as unknown.
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_vector_shape(self):
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = array_ops.placeholder_with_default(
rate_,
shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = poisson_lib.Poisson(rate)
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = batch_reshape_lib.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape:
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_4).eval()
with self.assertRaisesOpError("unexpected batch and event shape"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_114).eval()
class BatchReshapeStaticTest(_BatchReshapeTest, test.TestCase):
dtype = np.float32
is_static_shape = True
class BatchReshapeDynamicTest(_BatchReshapeTest, test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SeedStream class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import seed_stream
from tensorflow.python.platform import test
class SeedStreamTest(test.TestCase):
def assertAllUnique(self, items):
self.assertEqual(len(items), len(set(items)))
def testNonRepetition(self):
# The probability of repetitions in a short stream from a correct
# PRNG is negligible; this test catches bugs that prevent state
# updates.
strm = seed_stream.SeedStream(seed=4, salt="salt")
output = [strm() for _ in range(50)]
self.assertEqual(sorted(output), sorted(list(set(output))))
def testReproducibility(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="salt")
strm3 = seed_stream.SeedStream(seed=4, salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertEqual(outputs, [strm2() for _ in range(50)])
self.assertEqual(outputs, [strm3() for _ in range(50)])
def testSeededDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=5, salt="salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testSaltedDistinctness(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(seed=4, salt="another salt")
self.assertAllUnique(
[strm1() for _ in range(50)] + [strm2() for _ in range(50)])
def testNestingRobustness(self):
# SeedStreams started from generated seeds should not collide with
# the master or with each other, even if the salts are the same.
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1(), salt="salt")
strm3 = seed_stream.SeedStream(strm1(), salt="salt")
outputs = [strm1() for _ in range(50)]
self.assertAllUnique(
outputs + [strm2() for _ in range(50)] + [strm3() for _ in range(50)])
def testInitFromOtherSeedStream(self):
strm1 = seed_stream.SeedStream(seed=4, salt="salt")
strm2 = seed_stream.SeedStream(strm1, salt="salt")
strm3 = seed_stream.SeedStream(strm1, salt="another salt")
out1 = [strm1() for _ in range(50)]
out2 = [strm2() for _ in range(50)]
out3 = [strm3() for _ in range(50)]
self.assertAllEqual(out1, out2)
self.assertAllUnique(out1 + out3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/seed_stream_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.cached_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.cached_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.cached_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.cached_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.cached_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.cached_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.cached_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
ds = distributions
class MultivariateNormalDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.MultivariateNormalDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = array_ops.zeros((1, 3))
diag = array_ops.ones((1, 3))
with self.cached_session():
base_dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
dist = ds.TransformedDistribution(
base_dist, validate_args=True, bijector=bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mean().eval())
def testEntropy(self):
mu = [-1., 1]
diag = [-1., 5]
diag_mat = np.diag(diag)
scipy_mvn = stats.multivariate_normal(mean=mu, cov=diag_mat**2)
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
self.assertAllClose(scipy_mvn.entropy(), dist.entropy().eval(), atol=1e-4)
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e3), seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.MultivariateNormalDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu, mean.eval())
n = int(1e3)
samps = dist.sample(n, seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(
samps.transpose([1, 2, 0]), samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0), atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov, atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 3, 0], [0, 0, 3]],
[[2, 0, 0], [0, 2, 0], [0, 0, 2]]])**2.,
mvn.covariance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], mvn.batch_shape)
self.assertAllEqual([3], mvn.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0], [0, 2, 0], [0, 0, 1]],
[[4, 0, 0], [0, 5, 0], [0, 0, 6]]])**2.,
mvn.covariance().eval())
def testVariance(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(np.ones([3], dtype=np.float32), mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]])**2.,
mvn.variance().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]])**2.,
mvn.variance().eval())
def testStddev(self):
with self.cached_session():
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(np.ones([3], dtype=np.float32), mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3], [2, 2, 2]]),
mvn.stddev().eval())
mvn = ds.MultivariateNormalDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1], [4, 5, 6]]),
mvn.stddev().eval())
def testMultivariateNormalDiagWithSoftplusScale(self):
mu = [-1.0, 1.0]
diag = [-1.0, -2.0]
with self.cached_session():
dist = ds.MultivariateNormalDiagWithSoftplusScale(
mu, diag, validate_args=True)
samps = dist.sample(1000, seed=0).eval()
cov_mat = array_ops.matrix_diag(nn_ops.softplus(diag)).eval()**2
self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
def testMultivariateNormalDiagNegLogLikelihood(self):
num_draws = 50
dims = 3
with self.cached_session() as sess:
x_pl = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, dims], name="x")
mu_var = variable_scope.get_variable(
name="mu",
shape=[dims],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(1.))
sess.run([variables.global_variables_initializer()])
mvn = ds.MultivariateNormalDiag(
loc=mu_var,
scale_diag=array_ops.ones(shape=[dims], dtype=dtypes.float32))
# Typically you'd use `mvn.log_prob(x_pl)` which is always at least as
# numerically stable as `tf.math.log(mvn.prob(x_pl))`. However in this
# test we're testing a bug specific to `prob` and not `log_prob`;
# http://stackoverflow.com/q/45109305. (The underlying issue was not
# related to `Distributions` but that `reduce_prod` didn't correctly
# handle negative indexes.)
neg_log_likelihood = -math_ops.reduce_sum(math_ops.log(mvn.prob(x_pl)))
grad_neg_log_likelihood = gradients_impl.gradients(
neg_log_likelihood, variables.trainable_variables())
x = np.zeros([num_draws, dims], dtype=np.float32)
grad_neg_log_likelihood_ = sess.run(
grad_neg_log_likelihood, feed_dict={x_pl: x})
self.assertEqual(1, len(grad_neg_log_likelihood_))
self.assertAllClose(
grad_neg_log_likelihood_[0],
np.tile(num_draws, dims),
rtol=1e-6,
atol=0.)
def testDynamicBatchShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[None, None, 2]))
self.assertListEqual(mvn.batch_shape.as_list(), [None, None])
self.assertListEqual(mvn.event_shape.as_list(), [2])
def testDynamicEventShape(self):
mvn = ds.MultivariateNormalDiag(
loc=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]),
scale_diag=array_ops.placeholder(dtypes.float32, shape=[2, 3, None]))
self.assertListEqual(mvn.batch_shape.as_list(), [2, 3])
self.assertListEqual(mvn.event_shape.as_list(), [None])
def testKLDivIdenticalGradientDefined(self):
dims = 3
with self.cached_session() as sess:
loc = array_ops.zeros([dims], dtype=dtypes.float32)
mvn = ds.MultivariateNormalDiag(
loc=loc, scale_diag=np.ones([dims], dtype=np.float32))
g = gradients_impl.gradients(ds.kl_divergence(mvn, mvn), loc)
g_ = sess.run(g)
self.assertAllEqual(np.ones_like(g_, dtype=np.bool), np.isfinite(g_))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mvn_diag_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import autoregressive as autoregressive_lib
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.platform import test
class AutogressiveTest(test_util.VectorDistributionTestHelpers, test.TestCase):
"""Tests the Autoregressive distribution."""
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_scale_tril(self, event_size):
n = np.int32(event_size * (event_size + 1) // 2)
p = 2. * self._rng.random_sample(n).astype(np.float32) - 1.
return distribution_util.fill_triangular(0.25 * p)
def _normal_fn(self, affine_bijector):
def _fn(samples):
scale = math_ops.exp(affine_bijector.forward(samples))
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
def testSampleAndLogProbConsistency(self):
batch_shape = []
event_size = 2
with self.cached_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, ar, radius=1., center=0., rtol=0.01)
def testCompareToBijector(self):
"""Demonstrates equivalence between TD, Bijector approach and AR dist."""
sample_shape = np.int32([4, 5])
batch_shape = np.int32([])
event_size = np.int32(2)
with self.cached_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
ar_flow = MaskedAutoregressiveFlow(
is_constant_jacobian=True,
shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
validate_args=True)
td = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ar_flow,
event_shape=[event_size],
batch_shape=batch_shape,
validate_args=True)
x_shape = np.concatenate(
[sample_shape, batch_shape, [event_size]], axis=0)
x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
td_log_prob_, ar_log_prob_ = sess.run([td.log_prob(x), ar.log_prob(x)])
self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mixture
from tensorflow.contrib.distributions.python.ops import mixture_same_family
from tensorflow.contrib.distributions.python.ops import mvn_diag
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.linalg import linear_operator_diag
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
def _matrix_diag(d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _make_tril_scale(
loc=None,
scale_tril=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_tril is not None:
scale_tril = np.tril(scale_tril)
if scale_diag is not None:
scale_tril += _matrix_diag(np.array(scale_diag, dtype=np.float32))
if scale_identity_multiplier is not None:
scale_tril += (
scale_identity_multiplier * _matrix_diag(np.ones(
[scale_tril.shape[-1]], dtype=np.float32)))
return scale_tril
return _make_diag_scale(
loc, scale_diag, scale_identity_multiplier, shape_hint)
def _make_diag_scale(
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None):
if scale_diag is not None:
scale_diag = np.asarray(scale_diag)
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier
return _matrix_diag(scale_diag)
if loc is None and shape_hint is None:
return None
if shape_hint is None:
shape_hint = loc.shape[-1]
if scale_identity_multiplier is None:
scale_identity_multiplier = 1.
return scale_identity_multiplier * np.diag(np.ones(shape_hint))
class MakeTrilScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_tril_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_tril_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_tril_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]],
"scale_tril": [[[[1., 0., 0.],
[-3., 3., 0.],
[1., -2., 1.]],
[[2., 1., 0.],
[-4., 7., 0.],
[1., -1., 1.]]]]
})
def testZeroTriU(self):
with self.cached_session():
scale = distribution_util.make_tril_scale(scale_tril=[[1., 1], [1., 1.]])
self.assertAllClose([[1., 0], [1., 1.]], scale.to_dense().eval())
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_tril_scale(
scale_tril=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_tril_scale(
scale_tril=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class MakeDiagScaleTest(test.TestCase):
def _testLegalInputs(
self, loc=None, shape_hint=None, scale_params=None):
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({
"loc": loc,
"shape_hint": shape_hint}, **args)
expected_scale = _make_diag_scale(**scale_args)
if expected_scale is None:
# Not enough shape information was specified.
with self.assertRaisesRegexp(ValueError, ("is specified.")):
scale = distribution_util.make_diag_scale(**scale_args)
scale.to_dense().eval()
else:
scale = distribution_util.make_diag_scale(**scale_args)
self.assertAllClose(expected_scale, scale.to_dense().eval())
def testLegalInputs(self):
self._testLegalInputs(
loc=np.array([-1., -1.], dtype=np.float32),
shape_hint=2,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.]
})
def testLegalInputsMultidimensional(self):
self._testLegalInputs(
loc=np.array([[[-1., -1., 2.], [-2., -3., 4.]]], dtype=np.float32),
shape_hint=3,
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3., 4.], [3., 4., 5.]]]
})
def testValidateArgs(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be non-zero"):
scale = distribution_util.make_diag_scale(
scale_diag=[[0., 1], [1., 1.]], validate_args=True)
scale.to_dense().eval()
def testAssertPositive(self):
with self.cached_session():
with self.assertRaisesOpError("diagonal part must be positive"):
scale = distribution_util.make_diag_scale(
scale_diag=[[-1., 1], [1., 1.]],
validate_args=True,
assert_positive=True)
scale.to_dense().eval()
class ShapesFromLocAndScaleTest(test.TestCase):
def test_static_loc_static_scale_non_matching_event_size_raises(self):
loc = constant_op.constant(np.zeros((2, 4)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
with self.assertRaisesRegexp(ValueError, "could not be broadcast"):
distribution_util.shapes_from_loc_and_scale(loc, scale)
def test_static_loc_static_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 2]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_static_loc_dynamic_scale(self):
loc = constant_op.constant(np.zeros((2, 3)))
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_static_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = constant_op.constant(np.ones((5, 2, 3)))
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session():
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
# batch_shape depends on both args, and so is dynamic. Since loc did not
# have static shape, we inferred event shape entirely from scale, and this
# is available statically.
self.assertAllEqual(
[5, 2], batch_shape.eval(feed_dict={loc: np.zeros((2, 3))}))
self.assertAllEqual([3], event_shape)
def test_dynamic_loc_dynamic_scale(self):
loc = array_ops.placeholder(dtypes.float64)
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 2, 3)), loc: np.zeros((2, 3))})
self.assertAllEqual([5, 2], batch_shape)
self.assertAllEqual([3], event_shape)
def test_none_loc_static_scale(self):
loc = None
scale = linear_operator_diag.LinearOperatorDiag(np.ones((5, 1, 3)))
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
self.assertEqual(tensor_shape.TensorShape([5, 1]), batch_shape)
self.assertEqual(tensor_shape.TensorShape([3]), event_shape)
def test_none_loc_dynamic_scale(self):
loc = None
diag = array_ops.placeholder(dtypes.float64)
scale = linear_operator_diag.LinearOperatorDiag(diag)
with self.cached_session() as sess:
batch_shape, event_shape = sess.run(
distribution_util.shapes_from_loc_and_scale(loc, scale),
feed_dict={diag: np.ones((5, 1, 3))})
self.assertAllEqual([5, 1], batch_shape)
self.assertAllEqual([3], event_shape)
class GetBroadcastShapeTest(test.TestCase):
def test_all_static_shapes_work(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.ones((1, 5, 3))
z = array_ops.ones(())
self.assertAllEqual([2, 5, 3],
distribution_util.get_broadcast_shape(x, y, z))
def test_with_some_dynamic_shapes_works(self):
x = array_ops.ones((2, 1, 3))
y = array_ops.placeholder(x.dtype)
z = array_ops.ones(())
with self.cached_session() as sess:
bcast_shape = sess.run(
distribution_util.get_broadcast_shape(x, y, z),
feed_dict={y: np.ones((1, 5, 3)).astype(np.float32)})
self.assertAllEqual([2, 5, 3], bcast_shape)
class TridiagTest(test.TestCase):
def testWorksCorrectlyNoBatches(self):
with self.cached_session():
self.assertAllEqual(
[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
distribution_util.tridiag(
[1., 2., 3.],
[4., 5., 6., 7.],
[8., 9., 10.]).eval())
def testWorksCorrectlyBatches(self):
with self.cached_session():
self.assertAllClose(
[[[4., 8., 0., 0.],
[1., 5., 9., 0.],
[0., 2., 6., 10.],
[0., 0., 3, 7.]],
[[0.7, 0.1, 0.0, 0.0],
[0.8, 0.6, 0.2, 0.0],
[0.0, 0.9, 0.5, 0.3],
[0.0, 0.0, 1.0, 0.4]]],
distribution_util.tridiag(
[[1., 2., 3.],
[0.8, 0.9, 1.]],
[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]],
[[8., 9., 10.],
[0.1, 0.2, 0.3]]).eval(),
rtol=1e-5, atol=0.)
def testHandlesNone(self):
with self.cached_session():
self.assertAllClose(
[[[4., 0., 0., 0.],
[0., 5., 0., 0.],
[0., 0., 6., 0.],
[0., 0., 0, 7.]],
[[0.7, 0.0, 0.0, 0.0],
[0.0, 0.6, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.4]]],
distribution_util.tridiag(
diag=[[4., 5., 6., 7.],
[0.7, 0.6, 0.5, 0.4]]).eval(),
rtol=1e-5, atol=0.)
class MixtureStddevTest(test.TestCase):
def test_mixture_dev(self):
mixture_weights = np.array([
[1.0/3, 1.0/3, 1.0/3],
[0.750, 0.250, 0.000]
])
component_means = np.array([
[1.0, 1.0, 1.0],
[-5, 0, 1.25]
])
component_devs = np.array([
[1.0, 1.0, 1.0],
[0.01, 2.0, 0.1]
])
# The first case should trivially have a standard deviation of 1.0 because
# all components are identical and have that standard deviation.
# The second case was computed by hand.
expected_devs = np.array([
1.0,
2.3848637277
])
weights_tf = array_ops.constant(mixture_weights)
means_tf = array_ops.constant(component_means)
sigmas_tf = array_ops.constant(component_devs)
mix_dev = distribution_util.mixture_stddev(weights_tf,
means_tf,
sigmas_tf)
with self.cached_session() as sess:
actual_devs = sess.run(mix_dev)
self.assertAllClose(actual_devs, expected_devs)
class PadMixtureDimensionsTest(test.TestCase):
def test_pad_mixture_dimensions_mixture(self):
with self.cached_session() as sess:
gm = mixture.Mixture(
cat=categorical.Categorical(probs=[[0.3, 0.7]]),
components=[
normal.Normal(loc=[-1.0], scale=[1.0]),
normal.Normal(loc=[1.0], scale=[0.5])
])
x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.cat, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
def test_pad_mixture_dimensions_mixture_same_family(self):
with self.cached_session() as sess:
gm = mixture_same_family.MixtureSameFamily(
mixture_distribution=categorical.Categorical(probs=[0.3, 0.7]),
components_distribution=mvn_diag.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]], scale_identity_multiplier=[1.0, 0.5]))
x = array_ops.constant([[1.0, 2.0], [3.0, 4.0]])
x_pad = distribution_util.pad_mixture_dimensions(
x, gm, gm.mixture_distribution, gm.event_shape.ndims)
x_out, x_pad_out = sess.run([x, x_pad])
self.assertAllEqual(x_pad_out.shape, [2, 2, 1])
self.assertAllEqual(x_out.reshape([-1]), x_pad_out.reshape([-1]))
class _PadTest(object):
def testNegAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (constant_op.constant(value_) if self.is_static_shape
else array_ops.placeholder_with_default(value_, shape=None))
count = (constant_op.constant(count_) if self.is_static_shape
else array_ops.placeholder_with_default(count_, shape=None))
x0_front = distribution_util.pad(
x, axis=-2, value=value, count=count, front=True)
x0_back = distribution_util.pad(
x, axis=-2, count=count, back=True)
x0_both = distribution_util.pad(
x, axis=-2, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([4, 3], x0_front.shape)
self.assertAllEqual([4, 3], x0_back.shape)
self.assertAllEqual([4, 3], x0_both.shape)
[x0_front_, x0_back_, x0_both_] = sess.run([
x0_front, x0_back, x0_both])
self.assertAllClose(
np.float32([[value_]*3,
[value_]*3,
[1, 2, 3],
[4, 5, 6]]),
x0_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3],
[4, 5, 6],
[0.]*3,
[0.]*3]),
x0_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_]*3,
[1, 2, 3],
[4, 5, 6],
[value_]*3]),
x0_both_, atol=0., rtol=1e-6)
def testPosAxisCorrectness(self):
x_ = np.float32([[1., 2, 3],
[4, 5, 6]])
value_ = np.float32(0.25)
count_ = np.int32(2)
with self.cached_session() as sess:
x = array_ops.placeholder_with_default(
x_, shape=x_.shape if self.is_static_shape else None)
value = (constant_op.constant(value_) if self.is_static_shape
else array_ops.placeholder_with_default(value_, shape=None))
count = (constant_op.constant(count_) if self.is_static_shape
else array_ops.placeholder_with_default(count_, shape=None))
x1_front = distribution_util.pad(
x, axis=1, value=value, count=count, front=True)
x1_back = distribution_util.pad(
x, axis=1, count=count, back=True)
x1_both = distribution_util.pad(
x, axis=1, value=value, front=True, back=True)
if self.is_static_shape:
self.assertAllEqual([2, 5], x1_front.shape)
self.assertAllEqual([2, 5], x1_back.shape)
self.assertAllEqual([2, 5], x1_both.shape)
[x1_front_, x1_back_, x1_both_] = sess.run([
x1_front, x1_back, x1_both])
self.assertAllClose(
np.float32([[value_]*2 + [1, 2, 3],
[value_]*2 + [4, 5, 6]]),
x1_front_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[1, 2, 3] + [0.]*2,
[4, 5, 6] + [0.]*2]),
x1_back_, atol=0., rtol=1e-6)
self.assertAllClose(
np.float32([[value_, 1, 2, 3, value_],
[value_, 4, 5, 6, value_]]),
x1_both_, atol=0., rtol=1e-6)
class PadStaticTest(_PadTest, test.TestCase):
@property
def is_static_shape(self):
return True
class PadDynamicTest(_PadTest, test.TestCase):
@property
def is_static_shape(self):
return False
@test_util.run_all_in_graph_and_eager_modes
class TestMoveDimension(test.TestCase):
def test_move_dimension_static_shape(self):
x = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 1, 1)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 0, 3)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 0, -2)
self.assertAllEqual(x_perm.shape.as_list(), [30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(x_perm.shape.as_list(), [200, 30, 6, 4, 1])
def test_move_dimension_dynamic_shape(self):
x_ = random_ops.random_normal(shape=[200, 30, 4, 1, 6])
x = array_ops.placeholder_with_default(input=x_, shape=None)
x_perm = distribution_util.move_dimension(x, 1, 1)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 4, 1, 6])
x_perm = distribution_util.move_dimension(x, 0, 3)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 0, -2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[30, 4, 1, 200, 6])
x_perm = distribution_util.move_dimension(x, 4, 2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 6, 4, 1])
x_perm = distribution_util.move_dimension(x, -1, 2)
self.assertAllEqual(self.evaluate(array_ops.shape(x_perm)),
[200, 30, 6, 4, 1])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorExponentialLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
class VectorExponentialDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.VectorExponentialDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1. + 1., 1. - 5.], dist.mean().eval())
def testMode(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mode().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1. + 1, -1. - 5], dist.mean().eval())
def testSample(self):
mu = [-2., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e4), seed=0).eval()
cov_mat = array_ops.matrix_diag(diag).eval()**2
self.assertAllClose([-2 + 1, 1. - 2], samps.mean(axis=0),
atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T),
atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.VectorExponentialDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu + diag, mean.eval())
n = int(1e4)
samps = dist.sample(n, seed=0).eval()
samps_centered = samps - samps.mean(axis=0)
cov_mat = array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps_centered.transpose([1, 2, 0]),
samps_centered.transpose([1, 0, 2])) / n
self.assertAllClose(mu + diag, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.diag(np.ones([3], dtype=np.float32)),
vex.covariance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], vex.batch_shape)
self.assertAllEqual([3], vex.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 3, 0],
[0, 0, 3]],
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]]])**2.,
vex.covariance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], vex.batch_shape)
self.assertAllEqual([3], vex.event_shape)
self.assertAllClose(
np.array([[[3., 0, 0],
[0, 2, 0],
[0, 0, 1]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]])**2.,
vex.covariance().eval())
def testVariance(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
vex.variance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2., 2, 2]])**2.,
vex.variance().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.ones([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1],
[4., 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4., 5, 6]])**2.,
vex.variance().eval())
def testStddev(self):
with self.cached_session():
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.ones([3], dtype=np.float32),
vex.stddev().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.array([[3., 3, 3],
[2., 2, 2]]),
vex.stddev().eval())
vex = ds.VectorExponentialDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.array([[3., 2, 1],
[4., 5, 6]]),
vex.stddev().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_exponential_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorLaplaceLinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
class VectorLaplaceDiagTest(test.TestCase):
"""Well tested because this is a simple override of the base class."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testScalarParams(self):
mu = -1.
diag = -5.
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "at least 1 dimension"):
ds.VectorLaplaceDiag(mu, diag)
def testVectorParams(self):
mu = [-1.]
diag = [-5.]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([3, 1], dist.sample(3).get_shape())
def testDistWithBatchShapeOneThenTransformedThroughSoftplus(self):
# This complex combination of events resulted in a loss of static shape
# information when tensor_util.constant_value(self._needs_rotation) was
# being used incorrectly (resulting in always rotating).
# Batch shape = [1], event shape = [3]
mu = array_ops.zeros((1, 3))
diag = array_ops.ones((1, 3))
with self.cached_session():
base_dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
dist = ds.TransformedDistribution(
base_dist,
validate_args=True,
bijector=bijectors.Softplus())
samps = dist.sample(5) # Shape [5, 1, 3].
self.assertAllEqual([5, 1], dist.log_prob(samps).get_shape())
def testMean(self):
mu = [-1., 1]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual(mu, dist.mean().eval())
def testMeanWithBroadcastLoc(self):
mu = [-1.]
diag = [1., -5]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
self.assertAllEqual([-1., -1.], dist.mean().eval())
def testSample(self):
mu = [-1., 1]
diag = [1., -2]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
samps = dist.sample(int(1e4), seed=0).eval()
cov_mat = 2. * array_ops.matrix_diag(diag).eval()**2
self.assertAllClose(mu, samps.mean(axis=0),
atol=0., rtol=0.05)
self.assertAllClose(cov_mat, np.cov(samps.T),
atol=0.05, rtol=0.05)
def testSingularScaleRaises(self):
mu = [-1., 1]
diag = [1., 0]
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
with self.assertRaisesOpError("Singular"):
dist.sample().eval()
def testSampleWithBroadcastScale(self):
# mu corresponds to a 2-batch of 3-variate normals
mu = np.zeros([2, 3])
# diag corresponds to no batches of 3-variate normals
diag = np.ones([3])
with self.cached_session():
dist = ds.VectorLaplaceDiag(mu, diag, validate_args=True)
mean = dist.mean()
self.assertAllEqual([2, 3], mean.get_shape())
self.assertAllClose(mu, mean.eval())
n = int(1e4)
samps = dist.sample(n, seed=0).eval()
cov_mat = 2. * array_ops.matrix_diag(diag).eval()**2
sample_cov = np.matmul(samps.transpose([1, 2, 0]),
samps.transpose([1, 0, 2])) / n
self.assertAllClose(mu, samps.mean(axis=0),
atol=0.10, rtol=0.05)
self.assertAllClose([cov_mat, cov_mat], sample_cov,
atol=0.10, rtol=0.05)
def testCovariance(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
2. * np.diag(np.ones([3], dtype=np.float32)),
vla.covariance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0],
[0, 3, 0],
[0, 0, 3]],
[[2, 0, 0],
[0, 2, 0],
[0, 0, 2]]])**2.,
vla.covariance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllEqual([2], vla.batch_shape)
self.assertAllEqual([3], vla.event_shape)
self.assertAllClose(
2. * np.array([[[3., 0, 0],
[0, 2, 0],
[0, 0, 1]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]]])**2.,
vla.covariance().eval())
def testVariance(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
2. * np.ones([3], dtype=np.float32),
vla.variance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
2. * np.array([[3., 3, 3],
[2, 2, 2]])**2.,
vla.variance().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1],
[4, 5, 6]])
self.assertAllClose(
2. * np.array([[3., 2, 1],
[4, 5, 6]])**2.,
vla.variance().eval())
def testStddev(self):
with self.cached_session():
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([2, 3], dtype=dtypes.float32))
self.assertAllClose(
np.sqrt(2) * np.ones([3], dtype=np.float32),
vla.stddev().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_identity_multiplier=[3., 2.])
self.assertAllClose(
np.sqrt(2) * np.array([[3., 3, 3],
[2, 2, 2]]),
vla.stddev().eval())
vla = ds.VectorLaplaceDiag(
loc=array_ops.zeros([3], dtype=dtypes.float32),
scale_diag=[[3., 2, 1], [4, 5, 6]])
self.assertAllClose(
np.sqrt(2) * np.array([[3., 2, 1],
[4, 5, 6]]),
vla.stddev().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_laplace_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions as distributions_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
distributions = distributions_lib
rng = np.random.RandomState(123)
class QuantizedDistributionTest(test.TestCase):
def _assert_all_finite(self, array):
self.assertTrue(np.isfinite(array).all())
def testQuantizationOfUniformWithCutoffsHavingNoEffect(self):
with self.cached_session() as sess:
# The Quantized uniform with cutoffs == None divides the real line into:
# R = ...(-1, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# Since this uniform (below) is supported on [0, 3],
# it places 1/3 of its mass in the intervals j = 1, 2, 3.
# Adding a cutoff at y = 0 changes the picture to
# R = ...(-inf, 0](0, 1](1, 2](2, 3](3, 4]...
# j = ... 0 1 2 3 4 ...
# So the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
# Adding a cutoff at y = 3 changes the picture to
# R = ...(-1, 0](0, 1](1, 2](2, inf)
# j = ... 0 1 2 3
# and the QUniform still places 1/3 of its mass in the intervals
# j = 1, 2, 3.
for lcut, ucut in [(None, None), (0.0, None), (None, 3.0), (0.0, 3.0),
(-10., 10.)]:
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=0.0, high=3.0),
low=lcut,
high=ucut)
# pmf
pmf_n1, pmf_0, pmf_1, pmf_2, pmf_3, pmf_4, pmf_5 = sess.run(
qdist.prob([-1., 0., 1., 2., 3., 4., 5.]))
# uniform had no mass below -1.
self.assertAllClose(0., pmf_n1)
# uniform had no mass below 0.
self.assertAllClose(0., pmf_0)
# uniform put 1/3 of its mass in each of (0, 1], (1, 2], (2, 3],
# which are the intervals j = 1, 2, 3.
self.assertAllClose(1 / 3, pmf_1)
self.assertAllClose(1 / 3, pmf_2)
self.assertAllClose(1 / 3, pmf_3)
# uniform had no mass in (3, 4] or (4, 5], which are j = 4, 5.
self.assertAllClose(0 / 3, pmf_4)
self.assertAllClose(0 / 3, pmf_5)
# cdf
cdf_n1, cdf_0, cdf_1, cdf_2, cdf_2p5, cdf_3, cdf_4, cdf_5 = sess.run(
qdist.cdf([-1., 0., 1., 2., 2.5, 3., 4., 5.]))
self.assertAllClose(0., cdf_n1)
self.assertAllClose(0., cdf_0)
self.assertAllClose(1 / 3, cdf_1)
self.assertAllClose(2 / 3, cdf_2)
# Note fractional values allowed for cdfs of discrete distributions.
# And adding 0.5 makes no difference because the quantized dist has
# mass only on the integers, never in between.
self.assertAllClose(2 / 3, cdf_2p5)
self.assertAllClose(3 / 3, cdf_3)
self.assertAllClose(3 / 3, cdf_4)
self.assertAllClose(3 / 3, cdf_5)
def testQuantizationOfUniformWithCutoffsInTheMiddle(self):
with self.cached_session() as sess:
# The uniform is supported on [-3, 3]
# Consider partitions the real line in intervals
# ...(-3, -2](-2, -1](-1, 0](0, 1](1, 2](2, 3] ...
# Before cutoffs, the uniform puts a mass of 1/6 in each interval written
# above. Because of cutoffs, the qdist considers intervals and indices
# ...(-infty, -1](-1, 0](0, infty) ...
# -1 0 1
qdist = distributions.QuantizedDistribution(
distribution=distributions.Uniform(low=-3., high=3.),
low=-1.0,
high=1.0)
# pmf
cdf_n3, cdf_n2, cdf_n1, cdf_0, cdf_0p5, cdf_1, cdf_10 = sess.run(
qdist.cdf([-3., -2., -1., 0., 0.5, 1.0, 10.0]))
# Uniform had no mass on (-4, -3] or (-3, -2]
self.assertAllClose(0., cdf_n3)
self.assertAllClose(0., cdf_n2)
# Uniform had 1/6 of its mass in each of (-3, -2], and (-2, -1], which
# were collapsed into (-infty, -1], which is now the "-1" interval.
self.assertAllClose(1 / 3, cdf_n1)
# The j=0 interval contained mass from (-3, 0], which is 1/2 of the
# uniform's mass.
self.assertAllClose(1 / 2, cdf_0)
# Adding 0.5 makes no difference because the quantized dist has mass on
# the integers, not in between them.
self.assertAllClose(1 / 2, cdf_0p5)
# After applying the cutoff, all mass was either in the interval
# (0, infty), or below. (0, infty) is the interval indexed by j=1,
# so pmf(1) should equal 1.
self.assertAllClose(1., cdf_1)
# Since no mass of qdist is above 1,
# pmf(10) = P[Y <= 10] = P[Y <= 1] = pmf(1).
self.assertAllClose(1., cdf_10)
def testQuantizationOfBatchOfUniforms(self):
batch_shape = (5, 5)
with self.cached_session():
# The uniforms are supported on [0, 10]. The qdist considers the
# intervals
# ... (0, 1](1, 2]...(9, 10]...
# with the intervals displayed above each holding 1 / 10 of the mass.
# The qdist will be defined with no cutoffs,
uniform = distributions.Uniform(
low=array_ops.zeros(batch_shape, dtype=dtypes.float32),
high=10 * array_ops.ones(batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=uniform, low=None, high=None)
# x is random integers in {-3,...,12}.
x = rng.randint(-3, 13, size=batch_shape).astype(np.float32)
# pmf
# qdist.prob(j) = 1 / 10 for j in {1,...,10}, and 0 otherwise,
expected_pmf = (1 / 10) * np.ones(batch_shape)
expected_pmf[x < 1] = 0.
expected_pmf[x > 10] = 0.
self.assertAllClose(expected_pmf, qdist.prob(x).eval())
# cdf
# qdist.cdf(j)
# = 0 for j < 1
# = j / 10, for j in {1,...,10},
# = 1, for j > 10.
expected_cdf = x.copy() / 10
expected_cdf[x < 1] = 0.
expected_cdf[x > 10] = 1.
self.assertAllClose(expected_cdf, qdist.cdf(x).eval())
def testSamplingFromBatchOfNormals(self):
batch_shape = (2,)
with self.cached_session():
normal = distributions.Normal(
loc=array_ops.zeros(
batch_shape, dtype=dtypes.float32),
scale=array_ops.ones(
batch_shape, dtype=dtypes.float32))
qdist = distributions.QuantizedDistribution(
distribution=normal, low=0., high=None)
samps = qdist.sample(5000, seed=42)
samps_v = samps.eval()
# With low = 0, the interval j=0 is (-infty, 0], which holds 1/2
# of the mass of the normals.
# rtol chosen to be 2x as large as necessary to pass.
self.assertAllClose([0.5, 0.5], (samps_v == 0).mean(axis=0), rtol=0.03)
# The interval j=1 is (0, 1], which is from the mean to one standard
# deviation out. This should contain 0.6827 / 2 of the mass.
self.assertAllClose(
[0.6827 / 2, 0.6827 / 2], (samps_v == 1).mean(axis=0), rtol=0.03)
def testSamplesAgreeWithCdfForSamplesOverLargeRange(self):
# Consider the cdf for distribution X, F(x).
# If U ~ Uniform[0, 1], then Y := F^{-1}(U) is distributed like X since
# P[Y <= y] = P[F^{-1}(U) <= y] = P[U <= F(y)] = F(y).
# If F is a bijection, we also have Z = F(X) is Uniform.
#
# Make an exponential with large mean (= 100). This ensures we will get
# quantized values over a large range. This large range allows us to
# pretend that the cdf F is a bijection, and hence F(X) is uniform.
# Note that F cannot be bijection since it is constant between the
# integers. Hence, F(X) (see below) will not be uniform exactly.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.01))
# X ~ QuantizedExponential
x = qdist.sample(10000, seed=42)
# Z = F(X), should be Uniform.
z = qdist.cdf(x)
# Compare the CDF of Z to that of a Uniform.
# dist = maximum distance between P[Z <= a] and P[U <= a].
# We ignore pvalue, since of course this distribution is not exactly, and
# with so many sample points we would get a false fail.
dist, _ = stats.kstest(z.eval(), "uniform")
# Since the distribution take values (approximately) in [0, 100], the
# cdf should have jumps (approximately) every 1/100 of the way up.
# Assert that the jumps are not more than 2/100.
self.assertLess(dist, 0.02)
def testSamplesAgreeWithPdfForSamplesOverSmallRange(self):
# Testing that samples and pdf agree for a small range is important because
# it makes sure the bin edges are consistent.
# Make an exponential with mean 5.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Exponential(rate=0.2))
# Standard error should be less than 1 / (2 * sqrt(n_samples))
n_samples = 10000
stddev_err_bound = 1 / (2 * np.sqrt(n_samples))
samps = qdist.sample((n_samples,), seed=42).eval()
# The smallest value the samples can take on is 1, which corresponds to
# the interval (0, 1]. Recall we use ceiling in the sampling definition.
self.assertLess(0.5, samps.min())
x_vals = np.arange(1, 11).astype(np.float32)
pmf_vals = qdist.prob(x_vals).eval()
for ii in range(10):
self.assertAllClose(
pmf_vals[ii], (samps == x_vals[ii]).mean(), atol=stddev_err_bound)
def testNormalCdfAndSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-5, 5, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.cdf(x), qdist.cdf(x).eval())
self.assertAllClose(sp_normal.sf(x), qdist.survival_function(x).eval())
def testNormalLogCdfAndLogSurvivalFunction(self):
# At integer values, the result should be the same as the standard normal.
batch_shape = (3, 3)
mu = rng.randn(*batch_shape)
sigma = rng.rand(*batch_shape) + 1.0
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
sp_normal = stats.norm(mu, sigma)
x = rng.randint(-10, 10, size=batch_shape).astype(np.float64)
self.assertAllClose(sp_normal.logcdf(x), qdist.log_cdf(x).eval())
self.assertAllClose(
sp_normal.logsf(x), qdist.log_survival_function(x).eval())
def testNormalProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(sm_normal.cdf(-2), qdist.prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
sm_normal.cdf(-1) - sm_normal.cdf(-2), qdist.prob(-1.).eval(), atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
sm_normal.cdf(0) - sm_normal.cdf(-1), qdist.prob(0.).eval(), atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(1. - sm_normal.cdf(1), qdist.prob(2.).eval(), atol=0)
def testNormalLogProbWithCutoffs(self):
# At integer values, the result should be the same as the standard normal.
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=-2.,
high=2.)
sm_normal = stats.norm(0., 1.)
# These cutoffs create partitions of the real line, and indices:
# (-inf, -2](-2, -1](-1, 0](0, 1](1, inf)
# -2 -1 0 1 2
# Test interval (-inf, -2], <--> index -2.
self.assertAllClose(
np.log(sm_normal.cdf(-2)), qdist.log_prob(-2.).eval(), atol=0)
# Test interval (-2, -1], <--> index -1.
self.assertAllClose(
np.log(sm_normal.cdf(-1) - sm_normal.cdf(-2)),
qdist.log_prob(-1.).eval(),
atol=0)
# Test interval (-1, 0], <--> index 0.
self.assertAllClose(
np.log(sm_normal.cdf(0) - sm_normal.cdf(-1)),
qdist.log_prob(0.).eval(),
atol=0)
# Test interval (1, inf), <--> index 2.
self.assertAllClose(
np.log(1. - sm_normal.cdf(1)), qdist.log_prob(2.).eval(), atol=0)
def testLogProbAndGradGivesFiniteResults(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
mu = variables.Variable(0., name="mu", dtype=dtype)
sigma = variables.Variable(1., name="sigma", dtype=dtype)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = np.arange(-100, 100, 2).astype(dtype)
proba = qdist.log_prob(x)
grads = gradients_impl.gradients(proba, [mu, sigma])
with self.session(graph=g):
variables.global_variables_initializer().run()
self._assert_all_finite(proba.eval())
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testProbAndGradGivesFiniteResultsForCommonEvents(self):
with self.cached_session():
mu = variables.Variable(0.0, name="mu")
sigma = variables.Variable(1.0, name="sigma")
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=mu, scale=sigma))
x = math_ops.ceil(4 * rng.rand(100).astype(np.float32) - 2)
variables.global_variables_initializer().run()
proba = qdist.prob(x)
self._assert_all_finite(proba.eval())
grads = gradients_impl.gradients(proba, [mu, sigma])
self._assert_all_finite(grads[0].eval())
self._assert_all_finite(grads[1].eval())
def testLowerCutoffMustBeBelowUpperCutoffOrWeRaise(self):
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=1., # not strictly less than high.
high=1.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("must be strictly less"):
qdist.sample().eval()
def testCutoffsMustBeIntegerValuedIfValidateArgsTrue(self):
with self.cached_session():
low = array_ops.placeholder(dtypes.float32)
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(loc=0., scale=1.),
low=low,
high=10.,
validate_args=True)
self.assertTrue(qdist.validate_args) # Default is True.
with self.assertRaisesOpError("has non-integer components"):
qdist.sample().eval(feed_dict={low: 1.5})
def testCutoffsCanBeFloatValuedIfValidateArgsFalse(self):
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=0., scale=1., validate_args=False),
low=1.5,
high=10.11)
self.assertFalse(qdist.validate_args) # Default is True.
# Should not raise
qdist.sample().eval()
def testDtypeAndShapeInheritedFromBaseDist(self):
batch_shape = (2, 3)
with self.cached_session():
qdist = distributions.QuantizedDistribution(
distribution=distributions.Normal(
loc=array_ops.zeros(batch_shape),
scale=array_ops.zeros(batch_shape)),
low=1.0,
high=10.0)
self.assertEqual(batch_shape, qdist.batch_shape)
self.assertAllEqual(batch_shape, qdist.batch_shape_tensor().eval())
self.assertEqual((), qdist.event_shape)
self.assertAllEqual((), qdist.event_shape_tensor().eval())
samps = qdist.sample(10, seed=42)
self.assertEqual((10,) + batch_shape, samps.get_shape())
self.assertAllEqual((10,) + batch_shape, samps.eval().shape)
y = rng.randint(0, 5, size=batch_shape).astype(np.float32)
self.assertEqual(batch_shape, qdist.prob(y).get_shape())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/quantized_distribution_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import spectral_ops_test_util
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class _AutoCorrelationTest(object):
@property
def use_static_shape(self):
raise NotImplementedError("Subclass failed to implement `use_static_shape`")
@property
def dtype(self):
raise NotImplementedError("Subclass failed to implement `dtype`.")
def test_constant_sequence_axis_0_max_lags_none_center_false(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, center=False, normalize=False)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[1., 1., 1.]], auto_corr_)
def test_constant_sequence_axis_0_max_lags_none_center_true(self):
x_ = np.array([[0., 0., 0.],
[1., 1., 1.]]).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
input=x_,
shape=x_.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session() as sess:
# Setting normalize = True means we divide by zero.
auto_corr = sample_stats.auto_correlation(
x_ph, axis=1, normalize=False, center=True)
if self.use_static_shape:
self.assertEqual((2, 3), auto_corr.shape)
auto_corr_ = sess.run(auto_corr)
self.assertAllClose(
[[0., 0., 0.],
[0., 0., 0.]], auto_corr_)
def check_results_versus_brute_force(
self, x, axis, max_lags, center, normalize):
"""Compute auto-correlation by brute force, then compare to tf result."""
# Brute for auto-corr -- avoiding fft and transpositions.
axis_len = x.shape[axis]
if max_lags is None:
max_lags = axis_len - 1
else:
max_lags = min(axis_len - 1, max_lags)
auto_corr_at_lag = []
if center:
x -= x.mean(axis=axis, keepdims=True)
for m in range(max_lags + 1):
auto_corr_at_lag.append((
np.take(x, indices=range(0, axis_len - m), axis=axis) *
np.conj(np.take(x, indices=range(m, axis_len), axis=axis))
).mean(axis=axis, keepdims=True))
rxx = np.concatenate(auto_corr_at_lag, axis=axis)
if normalize:
rxx /= np.take(rxx, [0], axis=axis)
x_ph = array_ops.placeholder_with_default(
x, shape=x.shape if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
auto_corr = sample_stats.auto_correlation(
x_ph, axis=axis, max_lags=max_lags, center=center,
normalize=normalize)
if self.use_static_shape:
output_shape = list(x.shape)
output_shape[axis] = max_lags + 1
self.assertAllEqual(output_shape, auto_corr.shape)
self.assertAllClose(rxx, auto_corr.eval(), rtol=1e-5, atol=1e-5)
def test_axis_n1_center_false_max_lags_none(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=False)
def test_axis_n2_center_false_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=False)
def test_axis_n1_center_false_max_lags_none_normalize_true(self):
x = rng.randn(2, 3, 4).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-1, max_lags=None, center=False, normalize=True)
def test_axis_n2_center_false_max_lags_none_normalize_true(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=-2, max_lags=None, center=False, normalize=True)
def test_axis_0_center_true_max_lags_none(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=0, max_lags=None, center=True, normalize=False)
def test_axis_2_center_true_max_lags_1(self):
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=1, center=True, normalize=False)
def test_axis_2_center_true_max_lags_100(self):
# There are less than 100 elements in axis 2, so expect we get back an array
# the same size as x, despite having asked for 100 lags.
x = rng.randn(3, 4, 5).astype(self.dtype)
if self.dtype in [np.complex64]:
x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
self.check_results_versus_brute_force(
x, axis=2, max_lags=100, center=True, normalize=False)
def test_long_orthonormal_sequence_has_corr_length_0(self):
l = 10000
x = rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
def test_step_function_sequence(self):
# x jumps to new random value every 10 steps. So correlation length = 10.
x = (rng.randint(-10, 10, size=(1000, 1))
* np.ones((1, 10))).ravel().astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(1000 * 10,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=1000 * 10 // 2, center=True, normalize=False)
if self.use_static_shape:
self.assertAllEqual((1000 * 10 // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
rxx_ /= rxx_[0]
# Expect positive correlation for the first 10 lags, then significantly
# smaller negative.
self.assertGreater(rxx_[:10].min(), 0)
self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())
# RXX should be decreasing for the first 10 lags.
diff = np.diff(rxx_)
self.assertLess(diff[:10].max(), 0)
def test_normalization(self):
l = 10000
x = 3 * rng.randn(l).astype(self.dtype)
x_ph = array_ops.placeholder_with_default(
x, shape=(l,) if self.use_static_shape else None)
with spectral_ops_test_util.fft_kernel_label_map():
with self.cached_session():
rxx = sample_stats.auto_correlation(
x_ph, max_lags=l // 2, center=True, normalize=True)
if self.use_static_shape:
self.assertAllEqual((l // 2 + 1,), rxx.shape)
rxx_ = rxx.eval()
# Note that RXX[0] = 1, despite the fact that E[X^2] = 9, and this is
# due to normalize=True.
# OSS CPU FFT has some accuracy issues is not the most accurate.
# So this tolerance is a bit bad.
self.assertAllClose(1., rxx_[0], rtol=0.05)
# The maximal error in the rest of the sequence is not great.
self.assertAllClose(np.zeros(l // 2), rxx_[1:], atol=0.1)
# The mean error in the rest is ok, actually 0.008 when I tested it.
self.assertLess(np.abs(rxx_[1:]).mean(), 0.02)
class AutoCorrelationTestStaticShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return True
class AutoCorrelationTestStaticShapeComplex64(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.complex64
@property
def use_static_shape(self):
return True
class AutoCorrelationTestDynamicShapeFloat32(test.TestCase,
_AutoCorrelationTest):
@property
def dtype(self):
return np.float32
@property
def use_static_shape(self):
return False
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.cached_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.cached_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_invalid_interpolation_raises(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "interpolation"):
sample_stats.percentile(x, q=0.5, interpolation="bad")
def test_vector_q_raises_static(self):
x = [1., 5., 3., 2., 4.]
with self.assertRaisesRegexp(ValueError, "Expected.*ndims"):
sample_stats.percentile(x, q=[0.5])
def test_vector_q_raises_dynamic(self):
x = [1., 5., 3., 2., 4.]
q_ph = array_ops.placeholder(dtypes.float32)
pct = sample_stats.percentile(x, q=q_ph, validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("rank"):
pct.eval(feed_dict={q_ph: [0.5]})
def test_finds_max_of_long_array(self):
# d - 1 == d in float32 and d = 3e7.
# So this test only passes if we use double for the percentile indices.
# If float is used, it fails with InvalidArgumentError about an index out of
# bounds.
x = math_ops.linspace(0., 3e7, num=int(3e7))
with self.cached_session():
minval = sample_stats.percentile(x, q=0, validate_args=True)
self.assertAllEqual(0, minval.eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mixture distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from scipy import stats
from tensorflow.contrib import distributions
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
ds = distributions
def _swap_first_last_axes(array):
rank = len(array.shape)
transpose = [rank - 1] + list(range(0, rank - 1))
return array.transpose(transpose)
def _mixture_stddev_np(pi_vector, mu_vector, sigma_vector):
"""Computes the standard deviation of a univariate mixture distribution.
Acts upon `np.array`s (not `tf.Tensor`s).
Args:
pi_vector: A `np.array` of mixture weights. Shape `[batch, components]`.
mu_vector: A `np.array` of means. Shape `[batch, components]`
sigma_vector: A `np.array` of stddevs. Shape `[batch, components]`.
Returns:
A `np.array` containing the batch of standard deviations.
"""
pi_vector = np.expand_dims(pi_vector, axis=1)
mean_wa = np.matmul(pi_vector, np.expand_dims(mu_vector, axis=2))
var_wa = np.matmul(pi_vector, np.expand_dims(sigma_vector**2, axis=2))
mid_term = np.matmul(pi_vector, np.expand_dims(mu_vector**2, axis=2))
mixture_variance = (
np.squeeze(var_wa) + np.squeeze(mid_term) - np.squeeze(mean_wa**2))
return np.sqrt(mixture_variance)
@contextlib.contextmanager
def _test_capture_mvndiag_sample_outputs():
"""Use monkey-patching to capture the output of an MVNDiag _call_sample_n."""
data_container = []
true_mvndiag_call_sample_n = (
ds.MultivariateNormalDiag._call_sample_n)
def _capturing_mvndiag_call_sample_n(
self, sample_shape, seed, name, **kwargs):
samples = true_mvndiag_call_sample_n(
self, sample_shape, seed, name, **kwargs)
data_container.append(samples)
return samples
ds.MultivariateNormalDiag._call_sample_n = (
_capturing_mvndiag_call_sample_n)
yield data_container
ds.MultivariateNormalDiag._call_sample_n = (
true_mvndiag_call_sample_n)
@contextlib.contextmanager
def _test_capture_normal_sample_outputs():
"""Use monkey-patching to capture the output of an Normal _call_sample_n."""
data_container = []
true_normal_call_sample_n = ds.Normal._call_sample_n
def _capturing_normal_call_sample_n(self, sample_shape, seed, name, **kwargs):
samples = true_normal_call_sample_n(
self, sample_shape, seed, name, **kwargs)
data_container.append(samples)
return samples
ds.Normal._call_sample_n = _capturing_normal_call_sample_n
yield data_container
ds.Normal._call_sample_n = true_normal_call_sample_n
def make_univariate_mixture(batch_shape, num_components, use_static_graph):
batch_shape = ops.convert_to_tensor(batch_shape, dtypes.int32)
logits = random_ops.random_uniform(
array_ops.concat((batch_shape, [num_components]), axis=0),
-1, 1, dtype=dtypes.float32) - 50.
components = [
ds.Normal(
loc=random_ops.random_normal(batch_shape),
scale=10 * random_ops.random_uniform(batch_shape))
for _ in range(num_components)
]
cat = ds.Categorical(logits, dtype=dtypes.int32)
return ds.Mixture(cat, components, use_static_graph=use_static_graph)
def make_multivariate_mixture(batch_shape, num_components, event_shape,
use_static_graph, batch_shape_tensor=None):
if batch_shape_tensor is None:
batch_shape_tensor = batch_shape
batch_shape_tensor = ops.convert_to_tensor(batch_shape_tensor, dtypes.int32)
logits = random_ops.random_uniform(
array_ops.concat((batch_shape_tensor, [num_components]), 0),
-1, 1, dtype=dtypes.float32) - 50.
logits.set_shape(
tensor_shape.TensorShape(batch_shape).concatenate(num_components))
static_batch_and_event_shape = (
tensor_shape.TensorShape(batch_shape).concatenate(event_shape))
event_shape = ops.convert_to_tensor(event_shape, dtypes.int32)
batch_and_event_shape = array_ops.concat((batch_shape_tensor, event_shape), 0)
def create_component():
loc = random_ops.random_normal(batch_and_event_shape)
scale_diag = 10 * random_ops.random_uniform(batch_and_event_shape)
loc.set_shape(static_batch_and_event_shape)
scale_diag.set_shape(static_batch_and_event_shape)
return ds.MultivariateNormalDiag(
loc=loc, scale_diag=scale_diag)
components = [create_component() for _ in range(num_components)]
cat = ds.Categorical(logits, dtype=dtypes.int32)
return ds.Mixture(cat, components, use_static_graph=use_static_graph)
class MixtureTest(test.TestCase):
use_static_graph = False
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_univariate_mixture(batch_shape, num_components=10,
use_static_graph=self.use_static_graph)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
for event_shape in ([1], [2]):
dist = make_multivariate_mixture(
batch_shape, num_components=10, event_shape=event_shape,
use_static_graph=self.use_static_graph)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(event_shape, dist.event_shape)
self.assertAllEqual(event_shape, dist.event_shape_tensor().eval())
def testBrokenShapesStatic(self):
with self.assertRaisesWithPredicateMatch(ValueError,
r"cat.num_classes != len"):
ds.Mixture(
ds.Categorical([0.1, 0.5]), # 2 classes
[ds.Normal(loc=1.0, scale=2.0)],
use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(
ValueError, r"\(\) and \(2,\) are not compatible"):
# The value error is raised because the batch shapes of the
# Normals are not equal. One is a scalar, the other is a
# vector of size (2,).
ds.Mixture(
ds.Categorical([-0.5, 0.5]), # scalar batch
[
ds.Normal(
loc=1.0, scale=2.0), # scalar dist
ds.Normal(
loc=[1.0, 1.0], scale=[2.0, 2.0])
],
use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(ValueError, r"Could not infer"):
cat_logits = array_ops.placeholder(shape=[1, None], dtype=dtypes.float32)
ds.Mixture(
ds.Categorical(cat_logits),
[ds.Normal(
loc=[1.0], scale=[2.0])],
use_static_graph=self.use_static_graph)
def testBrokenShapesDynamic(self):
with self.cached_session():
d0_param = array_ops.placeholder(dtype=dtypes.float32)
d1_param = array_ops.placeholder(dtype=dtypes.float32)
d = ds.Mixture(
ds.Categorical([0.1, 0.2]), [
ds.Normal(
loc=d0_param, scale=d0_param), ds.Normal(
loc=d1_param, scale=d1_param)
],
validate_args=True,
use_static_graph=self.use_static_graph)
if self.use_static_graph:
error_string = r"Shapes of all inputs must match"
else:
error_string = r"batch shape must match"
with self.assertRaisesOpError(error_string):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: [1.0]})
with self.assertRaisesOpError(error_string):
d.sample().eval(feed_dict={d0_param: [2.0, 3.0], d1_param: 1.0})
def testBrokenTypes(self):
with self.assertRaisesWithPredicateMatch(TypeError, "Categorical"):
ds.Mixture(None, [], use_static_graph=self.use_static_graph)
cat = ds.Categorical([0.3, 0.2])
# components must be a list of distributions
with self.assertRaisesWithPredicateMatch(
TypeError, "all .* must be Distribution instances"):
ds.Mixture(cat, [None], use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(TypeError, "same dtype"):
ds.Mixture(
cat, [
ds.Normal(loc=[1.0], scale=[2.0]),
ds.Normal(loc=[np.float16(1.0)],
scale=[np.float16(2.0)]),
], use_static_graph=self.use_static_graph)
with self.assertRaisesWithPredicateMatch(ValueError, "non-empty list"):
ds.Mixture(ds.Categorical([0.3, 0.2]), None,
use_static_graph=self.use_static_graph)
# TODO(ebrevdo): once distribution Domains have been added, add a
# test to ensure that the domains of the distributions in a
# mixture are checked for equivalence.
def testMeanUnivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=2,
use_static_graph=self.use_static_graph)
mean = dist.mean()
self.assertEqual(batch_shape, mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape, mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testMeanMultivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,),
use_static_graph=self.use_static_graph)
mean = dist.mean()
self.assertEqual(batch_shape + (4,), mean.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_means = [d.mean() for d in dist.components]
mean_value, cat_probs_value, dist_means_value = sess.run(
[mean, cat_probs, dist_means])
self.assertEqual(batch_shape + (4,), mean_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.expand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testStddevShapeUnivariate(self):
num_components = 2
# This is the same shape test which is done in 'testMeanUnivariate'.
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=num_components,
use_static_graph=self.use_static_graph)
dev = dist.stddev()
self.assertEqual(batch_shape, dev.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_devs = [d.stddev() for d in dist.components]
dist_means = [d.mean() for d in dist.components]
res = sess.run([dev, cat_probs, dist_devs, dist_means])
dev_value, cat_probs_values, dist_devs_values, dist_means_values = res
# Manual computation of stddev.
batch_shape_res = cat_probs_values.shape[:-1]
event_shape_res = dist_devs_values[0].shape[len(batch_shape_res):]
stacked_mean_res = np.stack(dist_means_values, -1)
stacked_dev_res = np.stack(dist_devs_values, -1)
# Broadcast cat probs over event dimensions.
for _ in range(len(event_shape_res)):
cat_probs_values = np.expand_dims(cat_probs_values, len(batch_shape))
cat_probs_values = cat_probs_values + np.zeros_like(stacked_dev_res) # pylint: disable=g-no-augmented-assignment
# Perform stddev computation on a flattened batch.
flat_batch_manual_dev = _mixture_stddev_np(
np.reshape(cat_probs_values, [-1, num_components]),
np.reshape(stacked_mean_res, [-1, num_components]),
np.reshape(stacked_dev_res, [-1, num_components]))
# Reshape to full shape.
full_shape_res = list(batch_shape_res) + list(event_shape_res)
manual_dev = np.reshape(flat_batch_manual_dev, full_shape_res)
self.assertEqual(batch_shape, dev_value.shape)
self.assertAllClose(manual_dev, dev_value)
def testStddevShapeMultivariate(self):
num_components = 2
# This is the same shape test which is done in 'testMeanMultivariate'.
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape,
num_components=num_components,
event_shape=(4,),
use_static_graph=self.use_static_graph)
dev = dist.stddev()
self.assertEqual(batch_shape + (4,), dev.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_devs = [d.stddev() for d in dist.components]
dist_means = [d.mean() for d in dist.components]
res = sess.run([dev, cat_probs, dist_devs, dist_means])
dev_value, cat_probs_values, dist_devs_values, dist_means_values = res
# Manual computation of stddev.
batch_shape_res = cat_probs_values.shape[:-1]
event_shape_res = dist_devs_values[0].shape[len(batch_shape_res):]
stacked_mean_res = np.stack(dist_means_values, -1)
stacked_dev_res = np.stack(dist_devs_values, -1)
# Broadcast cat probs over event dimensions.
for _ in range(len(event_shape_res)):
cat_probs_values = np.expand_dims(cat_probs_values, len(batch_shape))
cat_probs_values = cat_probs_values + np.zeros_like(stacked_dev_res) # pylint: disable=g-no-augmented-assignment
# Perform stddev computation on a flattened batch.
flat_batch_manual_dev = _mixture_stddev_np(
np.reshape(cat_probs_values, [-1, num_components]),
np.reshape(stacked_mean_res, [-1, num_components]),
np.reshape(stacked_dev_res, [-1, num_components]))
# Reshape to full shape.
full_shape_res = list(batch_shape_res) + list(event_shape_res)
manual_dev = np.reshape(flat_batch_manual_dev, full_shape_res)
self.assertEqual(tuple(full_shape_res), dev_value.shape)
self.assertAllClose(manual_dev, dev_value)
def testSpecificStddevValue(self):
cat_probs = np.array([0.5, 0.5])
component_means = np.array([-10, 0.1])
component_devs = np.array([0.05, 2.33])
ground_truth_stddev = 5.3120805
mixture_dist = ds.Mixture(
cat=ds.Categorical(probs=cat_probs),
components=[
ds.Normal(loc=component_means[0],
scale=component_devs[0]),
ds.Normal(loc=component_means[1],
scale=component_devs[1]),
],
use_static_graph=self.use_static_graph)
mix_dev = mixture_dist.stddev()
with self.cached_session() as sess:
actual_stddev = sess.run(mix_dev)
self.assertAllClose(actual_stddev, ground_truth_stddev)
def testProbScalarUnivariate(self):
with self.cached_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2,
use_static_graph=self.use_static_graph)
for x in [
np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32),
np.random.randn(3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.cached_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3],
use_static_graph=self.use_static_graph)
for x in [
np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.cached_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2,
use_static_graph=self.use_static_graph)
for x in [
np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.cached_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4],
use_static_graph=self.use_static_graph)
for x in [
np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.cached_session() as sess:
num_components = 3
batch_shape = []
dist = make_univariate_mixture(
batch_shape=batch_shape, num_components=num_components,
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c]
else:
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.cached_session():
n = 100
random_seed.set_random_seed(654321)
components = [
ds.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat = ds.Categorical(
logits, dtype=dtypes.int32, name="cat1")
dist1 = ds.Mixture(cat, components, name="mixture1",
use_static_graph=self.use_static_graph)
samples1 = dist1.sample(n, seed=123456).eval()
random_seed.set_random_seed(654321)
components2 = [
ds.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat2 = ds.Categorical(
logits, dtype=dtypes.int32, name="cat2")
dist2 = ds.Mixture(cat2, components2, name="mixture2",
use_static_graph=self.use_static_graph)
samples2 = dist2.sample(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.cached_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2],
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c, :]
else:
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.cached_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components,
use_static_graph=self.use_static_graph)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 3
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c_s, which_c_b0,
which_c_b1]
else:
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1])
def _testSampleBatchMultivariate(self, fully_known_batch_shape):
with self.cached_session() as sess:
num_components = 3
if fully_known_batch_shape:
batch_shape = [2, 3]
batch_shape_tensor = [2, 3]
else:
batch_shape = [None, 3]
batch_shape_tensor = array_ops.placeholder(dtype=dtypes.int32)
dist = make_multivariate_mixture(
batch_shape=batch_shape,
num_components=num_components, event_shape=[4],
batch_shape_tensor=batch_shape_tensor,
use_static_graph=self.use_static_graph)
n = 5
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
if fully_known_batch_shape:
self.assertEqual((5, 2, 3, 4), samples.get_shape())
else:
self.assertEqual([5, None, 3, 4], samples.get_shape().as_list())
cat_samples = dist.cat.sample(n, seed=123)
if fully_known_batch_shape:
feed_dict = {}
else:
feed_dict = {batch_shape_tensor: [2, 3]}
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples],
feed_dict=feed_dict)
self.assertEqual((5, 2, 3, 4), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size == [2, 3], rank 4 (multivariate)
if self.use_static_graph:
which_dist_samples = dist_sample_values[c][which_c_s, which_c_b0,
which_c_b1, :]
else:
which_dist_samples = dist_sample_values[c][range(size_c), which_c_b0,
which_c_b1, :]
self.assertAllClose(which_dist_samples,
sample_values[which_c_s, which_c_b0, which_c_b1, :])
def testSampleBatchMultivariateFullyKnownBatchShape(self):
self._testSampleBatchMultivariate(fully_known_batch_shape=True)
def testSampleBatchMultivariateNotFullyKnownBatchShape(self):
self._testSampleBatchMultivariate(fully_known_batch_shape=False)
def testEntropyLowerBoundMultivariate(self):
with self.cached_session() as sess:
for batch_shape in ((), (2,), (2, 3)):
dist = make_multivariate_mixture(
batch_shape=batch_shape, num_components=2, event_shape=(4,),
use_static_graph=self.use_static_graph)
entropy_lower_bound = dist.entropy_lower_bound()
self.assertEqual(batch_shape, entropy_lower_bound.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_entropy = [d.entropy() for d in dist.components]
entropy_lower_bound_value, cat_probs_value, dist_entropy_value = (
sess.run([entropy_lower_bound, cat_probs, dist_entropy]))
self.assertEqual(batch_shape, entropy_lower_bound_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# entropy_lower_bound = sum_i pi_i entropy_i
# for i in num_components, batchwise.
true_entropy_lower_bound = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_entropy_value)])
self.assertAllClose(true_entropy_lower_bound, entropy_lower_bound_value)
def testCdfScalarUnivariate(self):
"""Tests CDF against scipy for a mixture of seven gaussians."""
# Construct a mixture of gaussians with seven components.
n_components = 7
# pre-softmax mixture probabilities.
mixture_weight_logits = np.random.uniform(
low=-1, high=1, size=(n_components,)).astype(np.float32)
def _scalar_univariate_softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# Construct the ds.Mixture object.
mixture_weights = _scalar_univariate_softmax(mixture_weight_logits)
means = [np.random.uniform(low=-10, high=10, size=()).astype(np.float32)
for _ in range(n_components)]
sigmas = [np.ones(shape=(), dtype=np.float32) for _ in range(n_components)]
cat_tf = ds.Categorical(probs=mixture_weights)
components_tf = [ds.Normal(loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
mixture_tf = ds.Mixture(cat=cat_tf, components=components_tf,
use_static_graph=self.use_static_graph)
x_tensor = array_ops.placeholder(shape=(), dtype=dtypes.float32)
# These are two test cases to verify.
xs_to_check = [
np.array(1.0, dtype=np.float32),
np.array(np.random.randn()).astype(np.float32)
]
# Carry out the test for both d.cdf and exp(d.log_cdf).
x_cdf_tf = mixture_tf.cdf(x_tensor)
x_log_cdf_tf = mixture_tf.log_cdf(x_tensor)
with self.cached_session() as sess:
for x_feed in xs_to_check:
x_cdf_tf_result, x_log_cdf_tf_result = sess.run(
[x_cdf_tf, x_log_cdf_tf], feed_dict={x_tensor: x_feed})
# Compute the cdf with scipy.
scipy_component_cdfs = [stats.norm.cdf(x=x_feed, loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
scipy_cdf_result = np.dot(mixture_weights,
np.array(scipy_component_cdfs))
self.assertAllClose(x_cdf_tf_result, scipy_cdf_result)
self.assertAllClose(np.exp(x_log_cdf_tf_result), scipy_cdf_result)
def testCdfBatchUnivariate(self):
"""Tests against scipy for a (batch of) mixture(s) of seven gaussians."""
n_components = 7
batch_size = 5
mixture_weight_logits = np.random.uniform(
low=-1, high=1, size=(batch_size, n_components)).astype(np.float32)
def _batch_univariate_softmax(x):
e_x = np.exp(x)
e_x_sum = np.expand_dims(np.sum(e_x, axis=1), axis=1)
return e_x / np.tile(e_x_sum, reps=[1, x.shape[1]])
psize = (batch_size,)
mixture_weights = _batch_univariate_softmax(mixture_weight_logits)
means = [np.random.uniform(low=-10, high=10, size=psize).astype(np.float32)
for _ in range(n_components)]
sigmas = [np.ones(shape=psize, dtype=np.float32)
for _ in range(n_components)]
cat_tf = ds.Categorical(probs=mixture_weights)
components_tf = [ds.Normal(loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
mixture_tf = ds.Mixture(cat=cat_tf, components=components_tf,
use_static_graph=self.use_static_graph)
x_tensor = array_ops.placeholder(shape=psize, dtype=dtypes.float32)
xs_to_check = [
np.array([1.0, 5.9, -3, 0.0, 0.0], dtype=np.float32),
np.random.randn(batch_size).astype(np.float32)
]
x_cdf_tf = mixture_tf.cdf(x_tensor)
x_log_cdf_tf = mixture_tf.log_cdf(x_tensor)
with self.cached_session() as sess:
for x_feed in xs_to_check:
x_cdf_tf_result, x_log_cdf_tf_result = sess.run(
[x_cdf_tf, x_log_cdf_tf],
feed_dict={x_tensor: x_feed})
# Compute the cdf with scipy.
scipy_component_cdfs = [stats.norm.cdf(x=x_feed, loc=mu, scale=sigma)
for (mu, sigma) in zip(means, sigmas)]
weights_and_cdfs = zip(np.transpose(mixture_weights, axes=[1, 0]),
scipy_component_cdfs)
final_cdf_probs_per_component = [
np.multiply(c_p_value, d_cdf_value)
for (c_p_value, d_cdf_value) in weights_and_cdfs]
scipy_cdf_result = np.sum(final_cdf_probs_per_component, axis=0)
self.assertAllClose(x_cdf_tf_result, scipy_cdf_result)
self.assertAllClose(np.exp(x_log_cdf_tf_result), scipy_cdf_result)
def testSampleBimixGamma(self):
"""Tests a bug in the underlying tf.Gamma op.
Mixture's use of dynamic partition requires `random_gamma` correctly returns
an empty `Tensor`.
"""
with self.cached_session():
gm = ds.Mixture(
cat=ds.Categorical(probs=[.3, .7]),
components=[ds.Gamma(1., 2.),
ds.Gamma(2., 1.)],
use_static_graph=self.use_static_graph)
x_ = gm.sample().eval()
self.assertAllEqual([], x_.shape)
class MixtureStaticSampleTest(MixtureTest):
use_static_graph = True
class MixtureBenchmark(test.Benchmark):
use_static_graph = False
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
num_components, batch_size, num_features,
sample_size):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with session.Session(config=config, graph=ops.Graph()) as sess:
random_seed.set_random_seed(0)
with ops.device("/device:GPU:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(variables.global_variables_initializer())
reported = self.run_op_benchmark(
sess,
sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
(name, use_gpu, num_components, batch_size, num_features,
sample_size)))
logging.vlog(2, "\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) % (
use_gpu, num_components, batch_size, num_features, sample_size,
reported["wall_time"]))
def benchmarkSamplingMVNDiag(self):
logging.vlog(
2, "mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = ds.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)
]
components = list(
ds.MultivariateNormalDiag(
loc=mu, scale_diag=sigma) for (mu, sigma) in zip(mus, sigmas))
return ds.Mixture(cat, components, use_static_graph=self.use_static_graph)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
def benchmarkSamplingMVNFull(self):
logging.vlog(
2, "mvn_full\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def psd(x):
"""Construct batch-wise PSD matrices."""
return np.stack([np.dot(np.transpose(z), z) for z in x])
def create_distribution(batch_size, num_components, num_features):
cat = ds.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(
psd(np.random.rand(batch_size, num_features, num_features)))
for _ in range(num_components)
]
components = list(
ds.MultivariateNormalTriL(
loc=mu, scale_tril=linalg_ops.cholesky(sigma))
for (mu, sigma) in zip(mus, sigmas))
return ds.Mixture(cat, components, use_static_graph=self.use_static_graph)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_full",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
class MixtureStaticSampleBenchmark(MixtureBenchmark):
use_static_graph = True
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/mixture_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SinhArcsinh."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(123)
class SinhArcsinhTest(test.TestCase):
def test_default_is_same_as_normal(self):
b = 10
scale = rng.rand(b) + 0.5
loc = rng.randn(b)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
validate_args=True)
x = rng.randn(5, b)
norm_pdf, sasnorm_pdf = sess.run([norm.prob(x), sasnorm.prob(x)])
self.assertAllClose(norm_pdf, sasnorm_pdf)
norm_samps, sasnorm_samps = sess.run(
[norm.sample(10000, seed=0),
sasnorm.sample(10000, seed=0)])
self.assertAllClose(loc, sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.mean(axis=0), sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1)
def test_broadcast_params_dynamic(self):
with self.cached_session() as sess:
loc = array_ops.placeholder(dtypes.float64)
scale = array_ops.placeholder(dtypes.float64)
skewness = array_ops.placeholder(dtypes.float64)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
skewness=skewness,
validate_args=True)
samp = sess.run(sasnorm.sample(),
feed_dict={loc: rng.rand(5),
scale: np.float64(rng.rand()), # Scalar
skewness: rng.rand(5)})
self.assertAllEqual((5,), samp.shape)
def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self):
b = 10
scale = rng.rand(b) + 0.5
loc = rng.randn(b)
with self.cached_session() as sess:
lap = ds.Laplace(
loc=loc,
scale=scale,
validate_args=True)
saslap = ds.SinhArcsinh(
loc=loc,
scale=scale,
distribution=ds.Laplace(np.float64(0), np.float64(1)),
validate_args=True)
x = rng.randn(5, b)
lap_pdf, saslap_pdf = sess.run([lap.prob(x), saslap.prob(x)])
self.assertAllClose(lap_pdf, saslap_pdf)
lap_samps, saslap_samps = sess.run(
[lap.sample(10000, seed=0),
saslap.sample(10000, seed=0)])
self.assertAllClose(loc, saslap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
lap_samps.mean(axis=0), saslap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
lap_samps.std(axis=0), saslap_samps.std(axis=0), atol=0.1)
def test_tailweight_small_gives_fewer_outliers_than_normal(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = 0.1 * rng.randn(batch_size)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
tailweight=0.1,
validate_args=True)
# sasnorm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * batch_size, [10] * batch_size]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(sasnorm_lp, norm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the normal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=1),
sasnorm.sample(int(5e5), seed=1)])
np.testing.assert_array_less(
np.percentile(norm_samps, 0.1, axis=0),
np.percentile(sasnorm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 99.9, axis=0),
np.percentile(norm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_tailweight_large_gives_more_outliers_than_normal(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = np.float64(0.)
with self.cached_session() as sess:
norm = ds.Normal(
loc=loc,
scale=scale,
validate_args=True)
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
tailweight=3.,
validate_args=True)
# norm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * batch_size, [10] * batch_size]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(norm_lp, sasnorm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the sasnormal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=2),
sasnorm.sample(int(5e5), seed=2)])
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 0.1, axis=0),
np.percentile(norm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(norm_samps, 99.9, axis=0),
np.percentile(sasnorm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_positive_skewness_moves_mean_to_the_right(self):
batch_size = 10
scale = rng.rand(batch_size) + 0.5
loc = rng.randn(batch_size)
with self.cached_session() as sess:
sasnorm = ds.SinhArcsinh(
loc=loc,
scale=scale,
skewness=3.0,
validate_args=True)
sasnorm_samps = sess.run(sasnorm.sample(10000, seed=4))
np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0))
def test_pdf_reflected_for_negative_skewness(self):
with self.cached_session() as sess:
sas_pos_skew = ds.SinhArcsinh(
loc=0.,
scale=1.,
skewness=2.,
validate_args=True)
sas_neg_skew = ds.SinhArcsinh(
loc=0.,
scale=1.,
skewness=-2.,
validate_args=True)
x = np.linspace(-2, 2, num=5).astype(np.float32)
self.assertAllClose(
*sess.run([sas_pos_skew.prob(x), sas_neg_skew.prob(x[::-1])]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/sinh_arcsinh_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorSinhArcsinhDiag."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.python.platform import test
ds = distributions
rng = np.random.RandomState(123)
class VectorSinhArcsinhDiagTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def test_default_is_same_as_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
x = rng.randn(5, d)
norm_pdf, sasnorm_pdf = sess.run([norm.prob(x), sasnorm.prob(x)])
self.assertAllClose(norm_pdf, sasnorm_pdf)
norm_samps, sasnorm_samps = sess.run(
[norm.sample(10000, seed=0),
sasnorm.sample(10000, seed=0)])
self.assertAllClose(loc, sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.mean(axis=0), sasnorm_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
norm_samps.std(axis=0), sasnorm_samps.std(axis=0), atol=0.1)
def test_passing_in_laplace_plus_defaults_is_same_as_laplace(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.2)
loc = rng.randn(d)
with self.cached_session() as sess:
vlap = ds.VectorLaplaceDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasvlap = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
distribution=ds.Laplace(np.float64(0.), np.float64(1.)),
validate_args=True)
x = rng.randn(5, d)
vlap_pdf, sasvlap_pdf = sess.run([vlap.prob(x), sasvlap.prob(x)])
self.assertAllClose(vlap_pdf, sasvlap_pdf)
vlap_samps, sasvlap_samps = sess.run(
[vlap.sample(10000, seed=0),
sasvlap.sample(10000, seed=0)])
self.assertAllClose(loc, sasvlap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
vlap_samps.mean(axis=0), sasvlap_samps.mean(axis=0), atol=0.1)
self.assertAllClose(
vlap_samps.std(axis=0), sasvlap_samps.std(axis=0), atol=0.1)
def test_tailweight_small_gives_fewer_outliers_than_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(0.9)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
tailweight=0.1,
validate_args=True)
# sasnorm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(sasnorm_lp, norm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the normal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=1),
sasnorm.sample(int(5e5), seed=1)])
np.testing.assert_array_less(
np.percentile(norm_samps, 0.1, axis=0),
np.percentile(sasnorm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 99.9, axis=0),
np.percentile(norm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_tailweight_large_gives_more_outliers_than_normal(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
norm = ds.MultivariateNormalDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=True)
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
tailweight=3.,
validate_args=True)
# norm.pdf(x) is smaller on outliers (+-10 are outliers)
x = np.float64([[-10] * d, [10] * d]) # Shape [2, 10]
norm_lp, sasnorm_lp = sess.run([norm.log_prob(x), sasnorm.log_prob(x)])
np.testing.assert_array_less(norm_lp, sasnorm_lp)
# 0.1% quantile and 99.9% quantile are outliers, and should be more
# extreme in the sasnormal. The 97.772% quantiles should be the same.
norm_samps, sasnorm_samps = sess.run(
[norm.sample(int(5e5), seed=2),
sasnorm.sample(int(5e5), seed=2)])
np.testing.assert_array_less(
np.percentile(sasnorm_samps, 0.1, axis=0),
np.percentile(norm_samps, 0.1, axis=0))
np.testing.assert_array_less(
np.percentile(norm_samps, 99.9, axis=0),
np.percentile(sasnorm_samps, 99.9, axis=0))
# 100. * sp.stats.norm.cdf(2.)
q = 100 * 0.97724986805182079
self.assertAllClose(
np.percentile(sasnorm_samps, q, axis=0),
np.percentile(norm_samps, q, axis=0),
rtol=0.03)
self.assertAllClose(
np.percentile(sasnorm_samps, 100 - q, axis=0),
np.percentile(norm_samps, 100 - q, axis=0),
rtol=0.03)
def test_positive_skewness_moves_mean_to_the_right(self):
d = 10
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.0)
loc = rng.randn(d)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=3.0,
validate_args=True)
sasnorm_samps = sess.run(sasnorm.sample(10000, seed=4))
np.testing.assert_array_less(loc, sasnorm_samps.mean(axis=0))
def test_consistency_random_parameters_with_batch_dim(self):
b, d = 5, 2
scale_diag = rng.rand(b, d)
scale_identity_multiplier = np.float64(1.1)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=rng.randn(d) * 0.5,
tailweight=rng.rand(b, d) + 0.7,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, sasnorm, radius=1.0, center=0., rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=-0.15,
rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=0.15,
rtol=0.1)
def test_consistency_random_parameters_no_batch_dims(self):
d = 3
scale_diag = rng.rand(d)
scale_identity_multiplier = np.float64(1.1)
with self.cached_session() as sess:
sasnorm = ds.VectorSinhArcsinhDiag(
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
skewness=rng.randn(d) * 0.5,
tailweight=rng.rand(d) + 0.7,
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, sasnorm, radius=1.0, center=0., rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=-0.15,
rtol=0.1)
self.run_test_sample_consistent_log_prob(
sess.run,
sasnorm,
radius=1.0,
center=0.15,
rtol=0.1)
def test_pdf_reflected_for_negative_skewness(self):
with self.cached_session() as sess:
sas_pos_skew = ds.VectorSinhArcsinhDiag(
loc=[0.],
scale_identity_multiplier=1.,
skewness=2.,
validate_args=True)
sas_neg_skew = ds.VectorSinhArcsinhDiag(
loc=[0.],
scale_identity_multiplier=1.,
skewness=-2.,
validate_args=True)
x = np.linspace(-2, 2, num=5).astype(np.float32).reshape(5, 1)
self.assertAllClose(
*sess.run([sas_pos_skew.prob(x), sas_neg_skew.prob(x[::-1])]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_sinh_arcsinh_diag_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
tfd = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
tfd.Normal,
tfd.Bernoulli,
tfd.Beta,
tfd.Chi2,
tfd.Exponential,
tfd.Gamma,
tfd.InverseGamma,
tfd.Laplace,
tfd.StudentT,
tfd.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.cached_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.cached_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = tfd.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.cached_session():
normal = tfd.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.cached_session():
mu = 1.
sigma = 2.
normal = tfd.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = tfd.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = tfd.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(tfd.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.cached_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.cached_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
def testNameScopeWorksCorrectly(self):
x = tfd.Normal(loc=0., scale=1., name="x")
x_duplicate = tfd.Normal(loc=0., scale=1., name="x")
with ops.name_scope("y") as name:
y = tfd.Bernoulli(logits=0., name=name)
x_sample = x.sample(name="custom_sample")
x_sample_duplicate = x.sample(name="custom_sample")
x_log_prob = x.log_prob(0., name="custom_log_prob")
x_duplicate_sample = x_duplicate.sample(name="custom_sample")
self.assertEqual(x.name, "x/")
self.assertEqual(x_duplicate.name, "x_1/")
self.assertEqual(y.name, "y/")
self.assertTrue(x_sample.name.startswith("x/custom_sample"))
self.assertTrue(x_sample_duplicate.name.startswith("x/custom_sample_1"))
self.assertTrue(x_log_prob.name.startswith("x/custom_log_prob"))
self.assertTrue(x_duplicate_sample.name.startswith(
"x_1/custom_sample"))
def testStrWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("tfp.distributions.Normal("
"\"Normal/\", "
"batch_shape=(), "
"event_shape=(), "
"dtype=float16)"), # Got the dtype right.
str(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("tfp.distributions.Chi2("
"\"silly/\", " # What a silly name that is!
"batch_shape=(2,), "
"event_shape=(), "
"dtype=float32)"),
str(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("tfp.distributions.Exponential(\"Exponential/\", "
# No batch shape.
"event_shape=(), "
"dtype=float32)"),
str(exp))
def testStrWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN/\", "
"batch_shape=(2,), "
"event_shape=(2,), "
"dtype=float64)"),
str(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
if mvn_dynamic.batch_shape._v2_behavior:
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(None,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
else:
self.assertEqual(
("tfp.distributions.MultivariateNormalDiag("
"\"MVN2/\", "
"batch_shape=(?,), " # Partially known.
"event_shape=(3,), "
"dtype=float32)"),
str(mvn_dynamic))
def testReprWorksCorrectlyScalar(self):
normal = tfd.Normal(loc=np.float16(0), scale=np.float16(1))
self.assertEqual(
("<tfp.distributions.Normal"
" 'Normal/'"
" batch_shape=()"
" event_shape=()"
" dtype=float16>"), # Got the dtype right.
repr(normal))
chi2 = tfd.Chi2(df=np.float32([1., 2.]), name="silly")
self.assertEqual(
("<tfp.distributions.Chi2"
" 'silly/'" # What a silly name that is!
" batch_shape=(2,)"
" event_shape=()"
" dtype=float32>"),
repr(chi2))
exp = tfd.Exponential(rate=array_ops.placeholder(dtype=dtypes.float32))
self.assertEqual(
("<tfp.distributions.Exponential"
" 'Exponential/'"
" batch_shape=<unknown>"
" event_shape=()"
" dtype=float32>"),
repr(exp))
def testReprWorksCorrectlyMultivariate(self):
mvn_static = tfd.MultivariateNormalDiag(
loc=np.zeros([2, 2]), name="MVN")
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN/'"
" batch_shape=(2,)"
" event_shape=(2,)"
" dtype=float64>"),
repr(mvn_static))
mvn_dynamic = tfd.MultivariateNormalDiag(
loc=array_ops.placeholder(shape=[None, 3], dtype=dtypes.float32),
name="MVN2")
if mvn_dynamic.batch_shape._v2_behavior:
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(None,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
else:
self.assertEqual(
("<tfp.distributions.MultivariateNormalDiag"
" 'MVN2/'"
" batch_shape=(?,)" # Partially known.
" event_shape=(3,)"
" dtype=float32>"),
repr(mvn_dynamic))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops import binomial
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BinomialTest(test.TestCase):
def testSimpleShapes(self):
with self.cached_session():
p = np.float32(np.random.beta(1, 1))
binom = binomial.Binomial(total_count=1., probs=p)
self.assertAllEqual([], binom.event_shape_tensor().eval())
self.assertAllEqual([], binom.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), binom.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), binom.batch_shape)
def testComplexShapes(self):
with self.cached_session():
p = np.random.beta(1, 1, size=(3, 2)).astype(np.float32)
n = [[3., 2], [4, 5], [6, 7]]
binom = binomial.Binomial(total_count=n, probs=p)
self.assertAllEqual([], binom.event_shape_tensor().eval())
self.assertAllEqual([3, 2], binom.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([]), binom.event_shape)
self.assertEqual(
tensor_shape.TensorShape([3, 2]), binom.batch_shape)
def testNProperty(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
binom = binomial.Binomial(total_count=n, probs=p)
self.assertEqual((2, 1), binom.total_count.get_shape())
self.assertAllClose(n, binom.total_count.eval())
def testPProperty(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
binom = binomial.Binomial(total_count=3., probs=p)
self.assertEqual((1, 3), binom.probs.get_shape())
self.assertEqual((1, 3), binom.logits.get_shape())
self.assertAllClose(p, binom.probs.eval())
def testLogitsProperty(self):
logits = [[0., 9., -0.5]]
with self.cached_session():
binom = binomial.Binomial(total_count=3., logits=logits)
self.assertEqual((1, 3), binom.probs.get_shape())
self.assertEqual((1, 3), binom.logits.get_shape())
self.assertAllClose(logits, binom.logits.eval())
def testPmfAndCdfNandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
binom = binomial.Binomial(total_count=n, probs=p, validate_args=True)
binom.prob([2., 3, 2]).eval()
binom.prob([3., 1, 2]).eval()
binom.cdf([2., 3, 2]).eval()
binom.cdf([3., 1, 2]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
binom.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("Condition x <= y.*"):
binom.prob([7., 3, 0]).eval()
with self.assertRaisesOpError("Condition x >= 0.*"):
binom.cdf([-1., 4, 2]).eval()
with self.assertRaisesOpError("Condition x <= y.*"):
binom.cdf([7., 3, 0]).eval()
def testPmfAndCdfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
binom = binomial.Binomial(total_count=n, probs=p, validate_args=True)
binom.prob([2., 3, 2]).eval()
binom.prob([3., 1, 2]).eval()
binom.cdf([2., 3, 2]).eval()
binom.cdf([3., 1, 2]).eval()
placeholder = array_ops.placeholder(dtypes.float32)
# Both equality and integer checking fail.
with self.assertRaisesOpError(
"cannot contain fractional components."):
binom.prob(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
with self.assertRaisesOpError(
"cannot contain fractional components."):
binom.cdf(placeholder).eval(feed_dict={placeholder: [1.0, 2.5, 1.5]})
binom = binomial.Binomial(total_count=n, probs=p, validate_args=False)
binom.prob([1., 2., 3.]).eval()
binom.cdf([1., 2., 3.]).eval()
# Non-integer arguments work.
binom.prob([1.0, 2.5, 1.5]).eval()
binom.cdf([1.0, 2.5, 1.5]).eval()
def testPmfAndCdfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = 0.5
counts = 1.
binom = binomial.Binomial(total_count=1., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=1, p=p), cdf.eval())
self.assertEqual((), pmf.get_shape())
self.assertEqual((), cdf.get_shape())
def testPmfAndCdfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = 0.1
counts = 3.
binom = binomial.Binomial(total_count=5., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(stats.binom.pmf(counts, n=5., p=p), pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=5., p=p), cdf.eval())
self.assertEqual((), pmf.get_shape())
self.assertEqual((), cdf.get_shape())
def testPmfAndCdfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 2.]]
binom = binomial.Binomial(total_count=3., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose(stats.binom.pmf(counts, n=3., p=p), pmf.eval())
self.assertAllClose(stats.binom.cdf(counts, n=3., p=p), cdf.eval())
self.assertEqual((1, 2), pmf.get_shape())
self.assertEqual((1, 2), cdf.get_shape())
def testPmfAndCdfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.4]
counts = [[1.], [0.]]
binom = binomial.Binomial(total_count=1., probs=p)
pmf = binom.prob(counts)
cdf = binom.cdf(counts)
self.assertAllClose([[0.1, 0.4], [0.9, 0.6]], pmf.eval())
self.assertAllClose([[1.0, 1.0], [0.9, 0.6]], cdf.eval())
self.assertEqual((2, 2), pmf.get_shape())
self.assertEqual((2, 2), cdf.get_shape())
def testBinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_means = stats.binom.mean(n, p)
self.assertEqual((3,), binom.mean().get_shape())
self.assertAllClose(expected_means, binom.mean().eval())
def testBinomialVariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_variances = stats.binom.var(n, p)
self.assertEqual((3,), binom.variance().get_shape())
self.assertAllClose(expected_variances, binom.variance().eval())
def testBinomialMode(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
expected_modes = [0., 1, 4]
self.assertEqual((3,), binom.mode().get_shape())
self.assertAllClose(expected_modes, binom.mode().eval())
def testBinomialMultipleMode(self):
with self.cached_session():
n = 9.
p = [0.1, 0.2, 0.7]
binom = binomial.Binomial(total_count=n, probs=p)
# For the case where (n + 1) * p is an integer, the modes are:
# (n + 1) * p and (n + 1) * p - 1. In this case, we get back
# the larger of the two modes.
expected_modes = [1., 2, 7]
self.assertEqual((3,), binom.mode().get_shape())
self.assertAllClose(expected_modes, binom.mode().eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/binomial_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import deterministic as deterministic_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class DeterministicTest(test.TestCase):
def testShape(self):
with self.cached_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.Deterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3, 4))
self.assertAllEqual(deterministic.batch_shape, (2, 3, 4))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.Deterministic(
loc, atol=-1, validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("Condition x >= 0"):
deterministic.prob(0.).eval()
def testProbWithNoBatchDimsIntegerType(self):
deterministic = deterministic_lib.Deterministic(0)
with self.cached_session():
self.assertAllClose(1, deterministic.prob(0).eval())
self.assertAllClose(0, deterministic.prob(2).eval())
self.assertAllClose([1, 0], deterministic.prob([0, 2]).eval())
def testProbWithNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
with self.cached_session():
self.assertAllClose(1., deterministic.prob(0.).eval())
self.assertAllClose(0., deterministic.prob(2.).eval())
self.assertAllClose([1., 0.], deterministic.prob([0., 2.]).eval())
def testProbWithDefaultTol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc)
expected_prob = [[1., 0.], [0., 1.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_prob = [[1., 0.], [1., 1.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATolIntegerType(self):
loc = [[0, 1], [2, 3]]
x = [[0, 2], [4, 2]]
deterministic = deterministic_lib.Deterministic(loc, atol=1)
expected_prob = [[1, 1], [0, 1]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
loc = [[0., 1.], [100., 100.]]
x = [[0., 1.1], [100.1, 103.]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_prob = [[1., 0.], [1., 0.]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 2), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTolIntegerType(self):
loc = [[10, 10, 10], [10, 10, 10]]
x = [[10, 20, 30], [10, 20, 30]]
# Batch 0 will have rtol = 0
# Batch 1 will have rtol = 1 (100% slack allowed)
deterministic = deterministic_lib.Deterministic(loc, rtol=[[0], [1]])
expected_prob = [[1, 0, 0], [1, 1, 0]]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((2, 3), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testCdfWithDefaultTol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc)
expected_cdf = [[0., 0.], [0., 1.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroATol(self):
loc = [[0., 0.], [0., 0.]]
x = [[-1., -0.1], [-0.01, 1.000001]]
deterministic = deterministic_lib.Deterministic(loc, atol=0.05)
expected_cdf = [[0., 0.], [1., 1.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testCdfWithNonzeroRTol(self):
loc = [[1., 1.], [100., 100.]]
x = [[0.9, 1.], [99.9, 97]]
deterministic = deterministic_lib.Deterministic(loc, rtol=0.01)
expected_cdf = [[0., 1.], [1., 0.]]
with self.cached_session():
cdf = deterministic.cdf(x)
self.assertAllEqual((2, 2), cdf.get_shape())
self.assertAllEqual(expected_cdf, cdf.eval())
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.Deterministic(0.)
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape, sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.Deterministic([0., 0.])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2,)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.Deterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.cached_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [0., 0.],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2,)).astype(np.float32), sample_)
def testEntropy(self):
loc = np.array([-0.1, -3.2, 7.])
deterministic = deterministic_lib.Deterministic(loc=loc)
with self.cached_session() as sess:
entropy_ = sess.run(deterministic.entropy())
self.assertAllEqual(np.zeros(3), entropy_)
class VectorDeterministicTest(test.TestCase):
def testShape(self):
with self.cached_session():
loc = rng.rand(2, 3, 4)
deterministic = deterministic_lib.VectorDeterministic(loc)
self.assertAllEqual(deterministic.batch_shape_tensor().eval(), (2, 3))
self.assertAllEqual(deterministic.batch_shape, (2, 3))
self.assertAllEqual(deterministic.event_shape_tensor().eval(), [4])
self.assertEqual(deterministic.event_shape, tensor_shape.TensorShape([4]))
def testInvalidTolRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.VectorDeterministic(
loc, atol=-1, validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("Condition x >= 0"):
deterministic.prob(loc).eval()
def testInvalidXRaises(self):
loc = rng.rand(2, 3, 4).astype(np.float32)
deterministic = deterministic_lib.VectorDeterministic(
loc, atol=-1, validate_args=True)
with self.cached_session():
with self.assertRaisesRegexp(ValueError, "must have rank at least 1"):
deterministic.prob(0.).eval()
def testProbVectorDeterministicWithNoBatchDims(self):
# 0 batch of deterministics on R^1.
deterministic = deterministic_lib.VectorDeterministic([0.])
with self.cached_session():
self.assertAllClose(1., deterministic.prob([0.]).eval())
self.assertAllClose(0., deterministic.prob([2.]).eval())
self.assertAllClose([1., 0.], deterministic.prob([[0.], [2.]]).eval())
def testProbWithDefaultTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc)
expected_prob = [1., 0., 0.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroATol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
x = [[0., 1.], [1.9, 3.], [3.99, 5.]]
deterministic = deterministic_lib.VectorDeterministic(loc, atol=0.05)
expected_prob = [1., 0., 1.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbWithNonzeroRTol(self):
# 3 batch of deterministics on R^2.
loc = [[0., 1.], [1., 1.], [100., 100.]]
x = [[0., 1.], [0.9, 1.], [99.9, 100.1]]
deterministic = deterministic_lib.VectorDeterministic(loc, rtol=0.01)
expected_prob = [1., 0., 1.]
with self.cached_session():
prob = deterministic.prob(x)
self.assertAllEqual((3,), prob.get_shape())
self.assertAllEqual(expected_prob, prob.eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZero(self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.cached_session():
self.assertAllClose(1., deterministic.prob([]).eval())
def testProbVectorDeterministicWithNoBatchDimsOnRZeroRaisesIfXNotInSameRk(
self):
# 0 batch of deterministics on R^0.
deterministic = deterministic_lib.VectorDeterministic(
[], validate_args=True)
with self.cached_session():
with self.assertRaisesOpError("not defined in the same space"):
deterministic.prob([1.]).eval()
def testSampleNoBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([0.])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (1,), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (1,)).astype(np.float32), sample.eval())
def testSampleWithBatchDims(self):
deterministic = deterministic_lib.VectorDeterministic([[0.], [0.]])
for sample_shape in [(), (4,)]:
with self.cached_session():
sample = deterministic.sample(sample_shape)
self.assertAllEqual(sample_shape + (2, 1), sample.get_shape())
self.assertAllClose(
np.zeros(sample_shape + (2, 1)).astype(np.float32), sample.eval())
def testSampleDynamicWithBatchDims(self):
loc = array_ops.placeholder(np.float32)
sample_shape = array_ops.placeholder(np.int32)
deterministic = deterministic_lib.VectorDeterministic(loc)
for sample_shape_ in [(), (4,)]:
with self.cached_session():
sample_ = deterministic.sample(sample_shape).eval(
feed_dict={loc: [[0.], [0.]],
sample_shape: sample_shape_})
self.assertAllClose(
np.zeros(sample_shape_ + (2, 1)).astype(np.float32), sample_)
def testEntropy(self):
loc = np.array([[8.3, 1.2, 3.3], [-0.1, -3.2, 7.]])
deterministic = deterministic_lib.VectorDeterministic(loc=loc)
with self.cached_session() as sess:
entropy_ = sess.run(deterministic.entropy())
self.assertAllEqual(np.zeros(2), entropy_)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/deterministic_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for OneHotCategorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import onehot_categorical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_onehot_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return onehot_categorical.OneHotCategorical(logits, dtype=dtype)
class OneHotCategoricalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testP(self):
p = [0.2, 0.8]
dist = onehot_categorical.OneHotCategorical(probs=p)
with self.cached_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = onehot_categorical.OneHotCategorical(logits=logits)
with self.cached_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.cached_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
# event_shape is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10,
tensor_util.constant_value(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(
batch_shape, constant_op.constant(10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertEqual(10, dist.event_shape_tensor().eval())
def testDtype(self):
dist = make_onehot_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_onehot_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(dist.logits.dtype, dist.prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
self.assertEqual(dist.logits.dtype, dist.log_prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.cached_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = onehot_categorical.OneHotCategorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertAllEqual([0, 1], sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.cached_session():
self.assertAllClose(
dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.cached_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testPmf(self):
# check that probability of samples correspond to their class probabilities
with self.cached_session():
logits = self._rng.random_sample(size=(8, 2, 10))
prob = np.exp(logits)/np.sum(np.exp(logits), axis=-1, keepdims=True)
dist = onehot_categorical.OneHotCategorical(logits=logits)
np_sample = dist.sample().eval()
np_prob = dist.prob(np_sample).eval()
expected_prob = prob[np_sample.astype(np.bool)]
self.assertAllClose(expected_prob, np_prob.flatten())
def testSample(self):
with self.cached_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
n = 100
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertAllEqual([n, 1, 2, 2], sample_values.shape)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
def testSampleWithSampleShape(self):
with self.cached_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose([0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()],
atol=1e-2)
self.assertAllClose([0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()],
atol=1e-2)
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.cached_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
logits = self._rng.rand(4, 3, 2).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(3e3)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
logits = self._rng.rand(3).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(1e4)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0) # elementwise mean
x_centered = x - sample_mean
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([3], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)
self.assertAllEqual([3, 3], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for computing moving-average statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import moving_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class MovingReduceMeanVarianceTest(test.TestCase):
def test_assign_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.cached_session() as sess:
# Start "x" out with this mean.
mean_var = variables.VariableV1(array_ops.zeros_like(true_mean))
variance_var = variables.VariableV1(array_ops.ones_like(true_stddev))
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.assign_moving_mean_variance(
mean_var, variance_var, x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
# Change the mean, var then update some more. Moving averages should
# re-converge.
sess.run([
mean_var.assign(np.array([[-1., 2.]])),
variance_var.assign(np.array([[2., 1.]])),
])
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.1, atol=0.)
def test_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.cached_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.moving_mean_variance(
x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[ema_, emv_] = sess.run([ema, emv])
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
class MovingLogExponentialMovingMeanExpTest(test.TestCase):
def test_assign_log_moving_mean_exp(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
decay = 0.99
with self.cached_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
log_mean_exp_var = variables.VariableV1(array_ops.zeros_like(true_mean))
variables.global_variables_initializer().run()
log_mean_exp = moving_stats.assign_log_moving_mean_exp(
log_mean_exp_var, x, decay=decay)
expected_ = np.zeros_like(true_mean)
for _ in range(2000):
x_, log_mean_exp_ = sess.run([x, log_mean_exp])
expected_ = np.log(decay * np.exp(expected_) + (1 - decay) * np.exp(x_))
self.assertAllClose(expected_, log_mean_exp_, rtol=1e-6, atol=1e-9)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for VectorDiffeomixture."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops import vector_diffeomixture as vdm_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class VectorDiffeomixtureTest(
test_util.VectorDistributionTestHelpers, test.TestCase):
"""Tests the VectorDiffeomixture distribution."""
def testSampleProbConsistentBroadcastMixNoBatch(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.015)
def testSampleProbConsistentBroadcastMixNonStandardBase(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(1., 1.5),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=1., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=3., rtol=0.01)
def testSampleProbConsistentBroadcastMixBatch(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [1.]],
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.1)],
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.stack([
np.linspace(2.5, 3.5, dims, dtype=np.float32),
np.linspace(2.75, 3.25, dims, dtype=np.float32),
]),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.01)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.01)
def testSampleProbConsistentBroadcastMixTwoBatchDims(self):
dims = 4
loc_1 = rng.randn(2, 3, dims).astype(np.float32)
with self.cached_session() as sess:
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=(rng.rand(2, 3, 1) - 0.5).astype(np.float32),
temperature=[1.],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
loc_1,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.1)],
is_positive_definite=True),
] * 2,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.01)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=3., center=loc_1, rtol=0.02)
def testMeanCovarianceNoBatch(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[1 / 10.],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
None,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testTemperatureControlsHowMuchThisLooksLikeDiscreteMixture(self):
# As temperature decreases, this should approach a mixture of normals, with
# components at -2, 2.
with self.cached_session() as sess:
dims = 1
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[0.],
temperature=[[2.], [1.], [0.2]],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
np.float32([2.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(0.5),
is_positive_definite=True),
] * 2, # Use the same scale for each component.
quadrature_size=8,
validate_args=True)
samps = vdm.sample(10000)
self.assertAllEqual((10000, 3, 1), samps.shape)
samps_ = sess.run(samps).reshape(10000, 3) # Make scalar event shape.
# One characteristic of a discrete mixture (as opposed to a "smear") is
# that more weight is put near the component centers at -2, 2, and thus
# less weight is put near the origin.
prob_of_being_near_origin = (np.abs(samps_) < 1).mean(axis=0)
self.assertGreater(
prob_of_being_near_origin[0], prob_of_being_near_origin[1])
self.assertGreater(
prob_of_being_near_origin[1], prob_of_being_near_origin[2])
# Run this test as well, just because we can.
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testConcentrationLocControlsHowMuchWeightIsOnEachComponent(self):
with self.cached_session() as sess:
dims = 1
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[-1.], [0.], [1.]],
temperature=[0.5],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([-2.]),
np.float32([2.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(0.5),
is_positive_definite=True),
] * 2, # Use the same scale for each component.
quadrature_size=8,
validate_args=True)
samps = vdm.sample(10000)
self.assertAllEqual((10000, 3, 1), samps.shape)
samps_ = sess.run(samps).reshape(10000, 3) # Make scalar event shape.
# One characteristic of putting more weight on a component is that the
# mean is closer to that component's mean.
# Get the mean for each batch member, the names signify the value of
# concentration for that batch member.
mean_neg1, mean_0, mean_1 = samps_.mean(axis=0)
# Since concentration is the concentration for component 0,
# concentration = -1 ==> more weight on component 1, which has mean = 2
# concentration = 0 ==> equal weight
# concentration = 1 ==> more weight on component 0, which has mean = -2
self.assertLess(-2, mean_1)
self.assertLess(mean_1, mean_0)
self.assertLess(mean_0, mean_neg1)
self.assertLess(mean_neg1, 2)
# Run this test as well, just because we can.
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.08)
def testMeanCovarianceNoBatchUncenteredNonStandardBase(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[0.1],
distribution=normal_lib.Normal(-1., 1.5),
loc=[
np.float32([-2.]),
np.float32([0.]),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.5),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, num_samples=int(1e6), rtol=0.01, cov_atol=0.025)
def testMeanCovarianceBatch(self):
with self.cached_session() as sess:
dims = 3
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[[0.], [4.]],
temperature=[0.1],
distribution=normal_lib.Normal(0., 1.),
loc=[
np.float32([[-2.]]),
None,
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=[np.float32(1.5)],
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.stack([
np.linspace(2.5, 3.5, dims, dtype=np.float32),
np.linspace(0.5, 1.5, dims, dtype=np.float32),
]),
is_positive_definite=True),
],
quadrature_size=8,
validate_args=True)
self.run_test_sample_consistent_mean_covariance(
sess.run, vdm, rtol=0.02, cov_rtol=0.07)
def testSampleProbConsistentQuadrature(self):
with self.cached_session() as sess:
dims = 4
vdm = vdm_lib.VectorDiffeomixture(
mix_loc=[0.],
temperature=[0.1],
distribution=normal_lib.Normal(0., 1.),
loc=[
None,
np.float32([2.]*dims),
],
scale=[
linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
linop_diag_lib.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
quadrature_size=3,
validate_args=True)
# Ball centered at component0's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=2., center=0., rtol=0.015)
# Larger ball centered at component1's mean.
self.run_test_sample_consistent_log_prob(
sess.run, vdm, radius=4., center=2., rtol=0.005)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_diffeomixture_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SquareBijectorTest(test.TestCase):
"""Tests the correctness of the Y = X ** 2 transformation."""
def testBijectorScalar(self):
with self.cached_session():
bijector = bijectors.Square(validate_args=True)
self.assertEqual("square", bijector.name)
x = [[[1., 5],
[2, 1]],
[[np.sqrt(2.), 3],
[np.sqrt(8.), 1]]]
y = np.square(x)
ildj = -np.log(2.) - np.log(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval(), atol=0., rtol=1e-7)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
atol=0.,
rtol=1e-7)
def testScalarCongruency(self):
with self.cached_session():
bijector = bijectors.Square(validate_args=True)
assert_scalar_congruency(bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/square_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ConditionalBijector Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors.conditional_bijector import ConditionalBijector
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class _TestBijector(ConditionalBijector):
def __init__(self):
super(_TestBijector, self).__init__(
forward_min_event_ndims=0,
graph_parents=[],
is_constant_jacobian=True,
validate_args=False,
dtype=dtypes.float32,
name="test_bijector")
def _forward(self, _, arg1, arg2):
raise ValueError("forward", arg1, arg2)
def _inverse(self, _, arg1, arg2):
raise ValueError("inverse", arg1, arg2)
def _inverse_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("inverse_log_det_jacobian", arg1, arg2)
def _forward_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("forward_log_det_jacobian", arg1, arg2)
class ConditionalBijectorTest(test.TestCase):
def testConditionalBijector(self):
b = _TestBijector()
for name in ["forward", "inverse"]:
method = getattr(b, name)
with self.assertRaisesRegexp(ValueError, name + ".*b1.*b2"):
method(1., arg1="b1", arg2="b2")
for name in ["inverse_log_det_jacobian", "forward_log_det_jacobian"]:
method = getattr(b, name)
with self.assertRaisesRegexp(ValueError, name + ".*b1.*b2"):
method(1., event_ndims=0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AbsoluteValue Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.absolute_value import AbsoluteValue
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# pylint: enable=g-importing-member
class AbsoluteValueTest(test.TestCase):
"""Tests correctness of the absolute value bijector."""
def testBijectorVersusNumpyRewriteOfBasicFunctionsEventNdims0(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
self.assertEqual("absolute_value", bijector.name)
x = array_ops.constant([[0., 1., -1], [0., -5., 3.]]) # Shape [2, 3]
y = math_ops.abs(x)
y_ = y.eval()
self.assertAllClose(y_, bijector.forward(x).eval())
self.assertAllClose((-y_, y_), sess.run(bijector.inverse(y)))
self.assertAllClose((0., 0.),
sess.run(bijector.inverse_log_det_jacobian(
y, event_ndims=0)))
# Run things twice to make sure there are no issues in caching the tuples
# returned by .inverse*
self.assertAllClose(y_, bijector.forward(x).eval())
self.assertAllClose((-y_, y_), sess.run(bijector.inverse(y)))
self.assertAllClose((0., 0.),
sess.run(bijector.inverse_log_det_jacobian(
y, event_ndims=0)))
def testNegativeYRaisesForInverseIfValidateArgs(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
with self.assertRaisesOpError("y was negative"):
sess.run(bijector.inverse(-1.))
def testNegativeYRaisesForILDJIfValidateArgs(self):
with self.cached_session() as sess:
bijector = AbsoluteValue(validate_args=True)
with self.assertRaisesOpError("y was negative"):
sess.run(bijector.inverse_log_det_jacobian(-1., event_ndims=0))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/absolute_value_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ScaleTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ScaleTriLBijectorTest(test.TestCase):
"""Tests the correctness of the ScaleTriL bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testComputesCorrectValues(self):
shift = 1.61803398875
x = np.float32(np.array([-1, .5, 2]))
y = np.float32(np.array([[np.exp(2) + shift, 0.],
[.5, np.exp(-1) + shift]]))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Exp(),
diag_shift=shift)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_, rtol=1e-4)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, rtol=1e-4)
@test_util.run_in_graph_and_eager_modes
def testInvertible(self):
# Generate random inputs from an unconstrained space, with
# event size 6 to specify 3x3 triangular matrices.
batch_shape = [2, 1]
x = np.float32(self._rng.randn(*(batch_shape + [6])))
b = bijectors.ScaleTriL(diag_bijector=bijectors.Softplus(),
diag_shift=3.14159)
y = self.evaluate(b.forward(x))
self.assertAllEqual(y.shape, batch_shape + [3, 3])
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_, rtol=1e-4)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(fldj, -ildj, rtol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/scale_tril_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for FillTriangular bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FillTriangularBijectorTest(test.TestCase):
"""Tests the correctness of the FillTriangular bijector."""
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.array([1., 2., 3.]))
y = np.float32(np.array([[3., 0.],
[2., 1.]]))
b = bijectors.FillTriangular()
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=1))
self.assertAllClose(fldj, 0.)
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllClose(ildj, 0.)
@test_util.run_in_graph_and_eager_modes
def testShape(self):
x_shape = tensor_shape.TensorShape([5, 4, 6])
y_shape = tensor_shape.TensorShape([5, 4, 3, 3])
b = bijectors.FillTriangular(validate_args=True)
x = array_ops.ones(shape=x_shape, dtype=dtypes.float32)
y_ = b.forward(x)
self.assertAllEqual(y_.shape.as_list(), y_shape.as_list())
x_ = b.inverse(y_)
self.assertAllEqual(x_.shape.as_list(), x_shape.as_list())
y_shape_ = b.forward_event_shape(x_shape)
self.assertAllEqual(y_shape_.as_list(), y_shape.as_list())
x_shape_ = b.inverse_event_shape(y_shape)
self.assertAllEqual(x_shape_.as_list(), x_shape.as_list())
y_shape_tensor = self.evaluate(
b.forward_event_shape_tensor(x_shape.as_list()))
self.assertAllEqual(y_shape_tensor, y_shape.as_list())
x_shape_tensor = self.evaluate(
b.inverse_event_shape_tensor(y_shape.as_list()))
self.assertAllEqual(x_shape_tensor, x_shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testShapeError(self):
b = bijectors.FillTriangular(validate_args=True)
x_shape_bad = tensor_shape.TensorShape([5, 4, 7])
with self.assertRaisesRegexp(ValueError, "is not a triangular number"):
b.forward_event_shape(x_shape_bad)
with self.assertRaisesOpError("is not a triangular number"):
self.evaluate(b.forward_event_shape_tensor(x_shape_bad.as_list()))
y_shape_bad = tensor_shape.TensorShape([5, 4, 3, 2])
with self.assertRaisesRegexp(ValueError, "Matrix must be square"):
b.inverse_event_shape(y_shape_bad)
with self.assertRaisesOpError("Matrix must be square"):
self.evaluate(b.inverse_event_shape_tensor(y_shape_bad.as_list()))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/fill_triangular_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.ordered import Ordered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class OrderedBijectorTest(test.TestCase):
"""Tests correctness of the ordered transformation."""
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijectorVector(self):
ordered = Ordered()
self.assertEqual("ordered", ordered.name)
x = np.asarray([[2., 3, 4], [4., 8, 13]])
y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
self.assertAllClose(y, self.evaluate(ordered.forward(x)))
self.assertAllClose(x, self.evaluate(ordered.inverse(y)))
self.assertAllClose(
np.sum(np.asarray(y)[..., 1:], axis=-1),
self.evaluate(ordered.inverse_log_det_jacobian(y, event_ndims=1)),
atol=0.,
rtol=1e-7)
self.assertAllClose(
self.evaluate(-ordered.inverse_log_det_jacobian(y, event_ndims=1)),
self.evaluate(ordered.forward_log_det_jacobian(x, event_ndims=1)),
atol=0.,
rtol=1e-7)
def testBijectorUnknownShape(self):
with self.cached_session():
ordered = Ordered()
self.assertEqual("ordered", ordered.name)
x = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_x = np.asarray([[2., 3, 4], [4., 8, 13]])
y = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_y = [[2., 0, 0], [4., np.log(4.), np.log(5.)]]
self.assertAllClose(real_y, ordered.forward(x).eval(
feed_dict={x: real_x}))
self.assertAllClose(real_x, ordered.inverse(y).eval(
feed_dict={y: real_y}))
self.assertAllClose(
np.sum(np.asarray(real_y)[..., 1:], axis=-1),
ordered.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-ordered.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
ordered.forward_log_det_jacobian(x, event_ndims=1).eval(
feed_dict={x: real_x}),
atol=0.,
rtol=1e-7)
@test_util.run_in_graph_and_eager_modes
def testShapeGetters(self):
x = tensor_shape.TensorShape([4])
y = tensor_shape.TensorShape([4])
bijector = Ordered(validate_args=True)
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(y.as_list(),
self.evaluate(bijector.forward_event_shape_tensor(
x.as_list())))
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(x.as_list(),
self.evaluate(bijector.inverse_event_shape_tensor(
y.as_list())))
def testBijectiveAndFinite(self):
with self.cached_session():
ordered = Ordered()
x = np.sort(self._rng.randn(3, 10), axis=-1).astype(np.float32)
y = (self._rng.randn(3, 10)).astype(np.float32)
assert_bijective_and_finite(ordered, x, y, event_ndims=1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/ordered_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineScalarBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.cached_session():
mu = -1.
# scale corresponds to 1.
bijector = AffineScalar(shift=mu)
self.assertEqual("affine_scalar", bijector.name)
def testNoBatchScalar(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = AffineScalar(shift=mu, scale=2.)
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose(
0.,
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = AffineScalar(scale=multiplier)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose(
[np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
0.,
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testTwoBatchScalarIdentityViaScale(self):
with self.cached_session() as sess:
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value).astype(np.float32)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x, **kwargs), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu, scale=[2., 1])
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(2), 0.],
run(bijector.inverse_log_det_jacobian, x, event_ndims=0))
def testScalarCongruency(self):
with self.cached_session():
bijector = AffineScalar(shift=3.6, scale=0.42)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.platform import test
class AffineLinearOperatorTest(test.TestCase):
def testIdentity(self):
with self.cached_session():
affine = AffineLinearOperator(
validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = x
ildj = 0.
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(ildj, affine.inverse_log_det_jacobian(
y, event_ndims=2).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=2).eval(),
affine.forward_log_det_jacobian(x, event_ndims=2).eval())
def testDiag(self):
with self.cached_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
diag = np.array([[1, 2, 3],
[2, 5, 6]], dtype=np.float32)
scale = linalg.LinearOperatorDiag(diag, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[1, 0, -1], [2, 3, 4]], dtype=np.float32)
y = diag * x + shift
ildj = -np.sum(np.log(np.abs(diag)), axis=-1)
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(
ildj, affine.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=1).eval(),
affine.forward_log_det_jacobian(x, event_ndims=1).eval())
def testTriL(self):
with self.cached_session():
shift = np.array([-1, 0, 1], dtype=np.float32)
tril = np.array([[[3, 0, 0],
[2, -1, 0],
[3, 2, 1]],
[[2, 0, 0],
[3, -2, 0],
[4, 3, 2]]],
dtype=np.float32)
scale = linalg.LinearOperatorLowerTriangular(tril, is_non_singular=True)
affine = AffineLinearOperator(
shift=shift, scale=scale, validate_args=True)
x = np.array([[[1, 0, -1],
[2, 3, 4]],
[[4, 1, -7],
[6, 9, 8]]],
dtype=np.float32)
# If we made the bijector do x*A+b then this would be simplified to:
# y = np.matmul(x, tril) + shift.
y = np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
ildj = -np.sum(np.log(np.abs(np.diagonal(
tril, axis1=-2, axis2=-1))))
self.assertEqual(affine.name, "affine_linear_operator")
self.assertAllClose(y, affine.forward(x).eval())
self.assertAllClose(x, affine.inverse(y).eval())
self.assertAllClose(
ildj, affine.inverse_log_det_jacobian(
y, event_ndims=2).eval())
self.assertAllClose(
-affine.inverse_log_det_jacobian(y, event_ndims=2).eval(),
affine.forward_log_det_jacobian(x, event_ndims=2).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_linear_operator_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exp Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class ExpBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = exp(X) transformation."""
def testBijector(self):
with self.cached_session():
bijector = Exp()
self.assertEqual("exp", bijector.name)
x = [[[1.], [2.]]]
y = np.exp(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
-np.squeeze(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(
np.exp(x), event_ndims=1).eval(),
bijector.forward_log_det_jacobian(
x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Exp()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Exp()
x = np.linspace(-10, 10, num=10).astype(np.float32)
y = np.logspace(-10, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/exp_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import real_nvp_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.real_nvp import RealNVP
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class RealNVPTest(test_util.VectorDistributionTestHelpers, test.TestCase):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[3], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4 * 2)
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=4,
validate_args=True,
**self._real_nvp_kwargs)
x = constant_op.constant(x_)
forward_x = nvp.forward(x)
# Use identity to invalidate cache.
inverse_y = nvp.inverse(array_ops.identity(forward_x))
forward_inverse_y = nvp.forward(inverse_y)
fldj = nvp.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = nvp.inverse_log_det_jacobian(
array_ops.identity(forward_x), event_ndims=1)
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
forward_inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
forward_inverse_y,
ildj,
fldj,
])
self.assertEqual("real_nvp", nvp.name)
self.assertAllClose(forward_x_, forward_inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-1, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
nvp = Invert(RealNVP(
num_masked=3,
validate_args=True,
**self._real_nvp_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=nvp,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class NICETest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
return {
"shift_and_log_scale_fn": real_nvp_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
class RealNVPConstantShiftScaleTest(RealNVPTest):
@property
def _real_nvp_kwargs(self):
def constant_shift_log_scale_fn(x0, output_units):
del x0, output_units
shift = constant_op.constant([0.1])
log_scale = constant_op.constant([0.5])
return shift, log_scale
return {
"shift_and_log_scale_fn": constant_shift_log_scale_fn,
"is_constant_jacobian": True,
}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/real_nvp_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class CholeskyOuterProductBijectorTest(test.TestCase):
"""Tests the correctness of the Y = X @ X.T transformation."""
def testBijectorMatrix(self):
with self.cached_session():
bijector = bijectors.CholeskyOuterProduct(validate_args=True)
self.assertEqual("cholesky_outer_product", bijector.name)
x = [[[1., 0], [2, 1]], [[np.sqrt(2.), 0], [np.sqrt(8.), 1]]]
y = np.matmul(x, np.transpose(x, axes=(0, 2, 1)))
# Fairly easy to compute differentials since we have 2x2.
dx_dy = [[[2. * 1, 0, 0],
[2, 1, 0],
[0, 2 * 2, 2 * 1]],
[[2 * np.sqrt(2.), 0, 0],
[np.sqrt(8.), np.sqrt(2.), 0],
[0, 2 * np.sqrt(8.), 2 * 1]]]
ildj = -np.sum(
np.log(np.asarray(dx_dy).diagonal(
offset=0, axis1=1, axis2=2)),
axis=1)
self.assertAllEqual((2, 2, 2), bijector.forward(x).get_shape())
self.assertAllEqual((2, 2, 2), bijector.inverse(y).get_shape())
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=2).eval(), atol=0., rtol=1e-7)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(
y, event_ndims=2).eval(),
bijector.forward_log_det_jacobian(
x, event_ndims=2).eval(),
atol=0.,
rtol=1e-7)
def testNoBatchStaticJacobian(self):
x = np.eye(2)
bijector = bijectors.CholeskyOuterProduct()
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=2)))
def testNoBatchDynamicJacobian(self):
x = np.eye(2)
bijector = bijectors.CholeskyOuterProduct()
x_pl = array_ops.placeholder(dtypes.float32)
with self.cached_session():
log_det_jacobian = bijector.forward_log_det_jacobian(x_pl, event_ndims=2)
# The Jacobian matrix is 2 * tf.eye(2), which has jacobian determinant 4.
self.assertAllClose(
np.log(4),
log_det_jacobian.eval({x_pl: x}))
def testNoBatchStatic(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
y_actual = bijectors.CholeskyOuterProduct().forward(x=x)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertAllEqual([2, 2], y_actual.get_shape())
self.assertAllEqual([2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testNoBatchDeferred(self):
x = np.array([[1., 0], [2, 1]]) # np.linalg.cholesky(y)
y = np.array([[1., 2], [2, 5]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
x_pl = array_ops.placeholder(dtypes.float32)
y_pl = array_ops.placeholder(dtypes.float32)
y_actual = bijectors.CholeskyOuterProduct().forward(x=x_pl)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchStatic(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
y_actual = bijectors.CholeskyOuterProduct().forward(x=x)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual])
self.assertEqual([2, 2, 2], y_actual.get_shape())
self.assertEqual([2, 2, 2], x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
def testBatchDeferred(self):
x = np.array([[[1., 0],
[2, 1]],
[[3., 0],
[1, 2]]]) # np.linalg.cholesky(y)
y = np.array([[[1., 2],
[2, 5]],
[[9., 3],
[3, 5]]]) # np.matmul(x, x.T)
with self.cached_session() as sess:
x_pl = array_ops.placeholder(dtypes.float32)
y_pl = array_ops.placeholder(dtypes.float32)
y_actual = bijectors.CholeskyOuterProduct().forward(x=x_pl)
x_actual = bijectors.CholeskyOuterProduct().inverse(y=y_pl)
[y_actual_, x_actual_] = sess.run([y_actual, x_actual],
feed_dict={x_pl: x, y_pl: y})
self.assertEqual(None, y_actual.get_shape())
self.assertEqual(None, x_actual.get_shape())
self.assertAllClose(y, y_actual_)
self.assertAllClose(x, x_actual_)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/cholesky_outer_product_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops.bijectors.gumbel import Gumbel
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class GumbelBijectorTest(test.TestCase):
"""Tests correctness of the Gumbel bijector."""
def testBijector(self):
with self.cached_session():
loc = 0.3
scale = 5.
bijector = Gumbel(loc=loc, scale=scale, validate_args=True)
self.assertEqual("gumbel", bijector.name)
x = np.array([[[-3.], [0.], [0.5], [4.2], [12.]]], dtype=np.float32)
# Gumbel distribution
gumbel_dist = stats.gumbel_r(loc=loc, scale=scale)
y = gumbel_dist.cdf(x).astype(np.float32)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
np.squeeze(gumbel_dist.logpdf(x), axis=-1),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Gumbel(loc=0.3, scale=20.), lower_x=1., upper_x=100., rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Gumbel(loc=0., scale=3.0, validate_args=True)
x = np.linspace(-10., 10., num=10).astype(np.float32)
y = np.linspace(0.01, 0.99, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/gumbel_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Permute bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.permute import Permute
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class PermuteBijectorTest(test.TestCase):
"""Tests correctness of the Permute bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
expected_permutation = np.int32([2, 0, 1])
expected_x = np.random.randn(4, 2, 3)
expected_y = expected_x[..., expected_permutation]
with self.cached_session() as sess:
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
[
permutation_,
x_,
y_,
fldj,
ildj,
] = sess.run([
bijector.permutation,
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=1),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=1),
], feed_dict={permutation_ph: expected_permutation})
self.assertEqual("permute", bijector.name)
self.assertAllEqual(expected_permutation, permutation_)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj, rtol=1e-6, atol=0)
def testRaisesOpError(self):
with self.cached_session() as sess:
with self.assertRaisesOpError("Permutation over `d` must contain"):
permutation_ph = array_ops.placeholder(dtype=dtypes.int32)
bijector = Permute(
permutation=permutation_ph,
validate_args=True)
sess.run(bijector.inverse([1.]),
feed_dict={permutation_ph: [1, 2]})
def testBijectiveAndFinite(self):
permutation = np.int32([2, 0, 1])
x = np.random.randn(4, 2, 3)
y = x[..., permutation]
with self.cached_session():
bijector = Permute(permutation=permutation, validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=1, rtol=1e-6, atol=0)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/permute_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MaskedAutoregressiveFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import _gen_mask
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import masked_autoregressive_default_template
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
class GenMaskTest(test.TestCase):
def test346Exclusive(self):
expected_mask = np.array(
[[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="exclusive")
self.assertAllEqual(expected_mask, mask)
def test346Inclusive(self):
expected_mask = np.array(
[[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[1, 1, 1, 0]])
mask = _gen_mask(num_blocks=3, n_in=4, n_out=6, mask_type="inclusive")
self.assertAllEqual(expected_mask, mask)
class MaskedAutoregressiveFlowTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=False),
"is_constant_jacobian": False,
}
def testBijector(self):
x_ = np.arange(3 * 4 * 2).astype(np.float32).reshape(3, 4, 2)
with self.cached_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
x = constant_op.constant(x_)
forward_x = ma.forward(x)
# Use identity to invalidate cache.
inverse_y = ma.inverse(array_ops.identity(forward_x))
fldj = ma.forward_log_det_jacobian(x, event_ndims=1)
# Use identity to invalidate cache.
ildj = ma.inverse_log_det_jacobian(
array_ops.identity(forward_x), event_ndims=1)
variables.global_variables_initializer().run()
[
forward_x_,
inverse_y_,
ildj_,
fldj_,
] = sess.run([
forward_x,
inverse_y,
ildj,
fldj,
])
self.assertEqual("masked_autoregressive_flow", ma.name)
self.assertAllClose(forward_x_, forward_x_, rtol=1e-6, atol=0.)
self.assertAllClose(x_, inverse_y_, rtol=1e-5, atol=0.)
self.assertAllClose(ildj_, -fldj_, rtol=1e-6, atol=0.)
def testMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
ma = MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
dims = 4
with self.cached_session() as sess:
ma = Invert(MaskedAutoregressiveFlow(
validate_args=True,
**self._autoregressive_flow_kwargs))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ma,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=1.,
center=0.,
rtol=0.02)
class MaskedAutoregressiveFlowShiftOnlyTest(MaskedAutoregressiveFlowTest):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=True),
"is_constant_jacobian": True,
}
class MaskedAutoregressiveFlowUnrollLoopTest(MaskedAutoregressiveFlowTest):
@property
def _autoregressive_flow_kwargs(self):
return {
"shift_and_log_scale_fn": masked_autoregressive_default_template(
hidden_layers=[2], shift_only=False),
"is_constant_jacobian": False,
"unroll_loop": True,
}
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/masked_autoregressive_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import gamma as gamma_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class InvertBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
with self.cached_session():
for fwd in [
bijectors.Identity(),
bijectors.Exp(),
bijectors.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
bijectors.Softplus(),
bijectors.SoftmaxCentered(),
]:
rev = bijectors.Invert(fwd)
self.assertEqual("_".join(["invert", fwd.name]), rev.name)
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(fwd.inverse(x).eval(), rev.forward(x).eval())
self.assertAllClose(fwd.forward(x).eval(), rev.inverse(x).eval())
self.assertAllClose(
fwd.forward_log_det_jacobian(x, event_ndims=1).eval(),
rev.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
fwd.inverse_log_det_jacobian(x, event_ndims=1).eval(),
rev.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = bijectors.Invert(bijectors.Exp())
assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.cached_session():
bijector = bijectors.Invert(bijectors.SoftmaxCentered(validate_args=True))
x = tensor_shape.TensorShape([2])
y = tensor_shape.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
def testDocstringExample(self):
with self.cached_session():
exp_gamma_distribution = (
transformed_distribution_lib.TransformedDistribution(
distribution=gamma_lib.Gamma(concentration=1., rate=2.),
bijector=bijectors.Invert(bijectors.Exp())))
self.assertAllEqual(
[], array_ops.shape(exp_gamma_distribution.sample()).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/invert_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.contrib.distributions.python.ops.bijectors.chain import Chain
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class ShapeChanging(bijector.Bijector):
"""Only used for op_ndims manipulation."""
def __init__(self, forward_min_event_ndims=0, inverse_min_event_ndims=3):
super(ShapeChanging, self).__init__(
forward_min_event_ndims=forward_min_event_ndims,
inverse_min_event_ndims=inverse_min_event_ndims,
validate_args=False, name="shape_changer")
class ChainBijectorTest(test.TestCase):
"""Tests the correctness of the Y = Chain(bij1, bij2, bij3) transformation."""
def testBijector(self):
with self.cached_session():
chain = Chain((Exp(), Softplus()))
self.assertEqual("chain_of_exp_of_softplus", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(1. + np.exp(x), chain.forward(x).eval())
self.assertAllClose(np.log(x - 1.), chain.inverse(x).eval())
self.assertAllClose(
-np.sum(np.log(x - 1.), axis=2),
chain.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
np.sum(x, axis=2),
chain.forward_log_det_jacobian(x, event_ndims=1).eval())
def testBijectorIdentity(self):
with self.cached_session():
chain = Chain()
self.assertEqual("identity", chain.name)
x = np.asarray([[[1., 2.],
[2., 3.]]])
self.assertAllClose(x, chain.forward(x).eval())
self.assertAllClose(x, chain.inverse(x).eval())
self.assertAllClose(
0., chain.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
0., chain.forward_log_det_jacobian(x, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
chain = Chain((Exp(), Softplus()))
assert_scalar_congruency(
chain, lower_x=1e-3, upper_x=1.5, rtol=0.05)
def testShapeGetters(self):
with self.cached_session():
chain = Chain([
SoftmaxCentered(validate_args=True),
SoftmaxCentered(validate_args=True),
])
x = tensor_shape.TensorShape([1])
y = tensor_shape.TensorShape([2 + 1])
self.assertAllEqual(y, chain.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
chain.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, chain.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
chain.inverse_event_shape_tensor(y.as_list()).eval())
def testMinEventNdimsChain(self):
chain = Chain([Exp(), Exp(), Exp()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Affine(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Exp(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Exp()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([Affine(), Exp(), Softplus(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingAddDims(self):
chain = Chain([ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(3, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(), Affine()])
self.assertEqual(1, chain.forward_min_event_ndims)
self.assertEqual(4, chain.inverse_min_event_ndims)
chain = Chain([Affine(), ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(3, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(), ShapeChanging()])
self.assertEqual(0, chain.forward_min_event_ndims)
self.assertEqual(6, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingRemoveDims(self):
chain = Chain([ShapeChanging(3, 0)])
self.assertEqual(3, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(3, 0), Affine()])
self.assertEqual(3, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
chain = Chain([Affine(), ShapeChanging(3, 0)])
self.assertEqual(4, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
chain = Chain([ShapeChanging(3, 0), ShapeChanging(3, 0)])
self.assertEqual(6, chain.forward_min_event_ndims)
self.assertEqual(0, chain.inverse_min_event_ndims)
def testMinEventNdimsShapeChangingAddRemoveDims(self):
chain = Chain([
ShapeChanging(2, 1),
ShapeChanging(3, 0),
ShapeChanging(1, 2)])
self.assertEqual(4, chain.forward_min_event_ndims)
self.assertEqual(1, chain.inverse_min_event_ndims)
def testChainExpAffine(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = Chain([Exp(), Affine(scale_diag=scale_diag)])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 27.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(scale_diag * x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(scale_diag * x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testChainAffineExp(self):
scale_diag = np.array([1., 2., 3.], dtype=np.float32)
chain = Chain([Affine(scale_diag=scale_diag), Exp()])
x = [0., np.log(2., dtype=np.float32), np.log(3., dtype=np.float32)]
y = [1., 4., 9.]
self.assertAllClose(y, self.evaluate(chain.forward(x)))
self.assertAllClose(x, self.evaluate(chain.inverse(y)))
self.assertAllClose(
np.log(6, dtype=np.float32) + np.sum(x),
self.evaluate(chain.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-np.log(6, dtype=np.float32) - np.sum(x),
self.evaluate(chain.inverse_log_det_jacobian(y, event_ndims=1)))
def testChainIldjWithPlaceholder(self):
chain = Chain((Exp(), Exp()))
samples = array_ops.placeholder(
dtype=np.float32, shape=[None, 10], name="samples")
ildj = chain.inverse_log_det_jacobian(samples, event_ndims=0)
self.assertTrue(ildj is not None)
with self.cached_session():
ildj.eval({samples: np.zeros([2, 10], np.float32)})
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/chain_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SigmoidBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation."""
def testBijector(self):
with self.cached_session():
self.assertEqual("sigmoid", Sigmoid().name)
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = Sigmoid()
self.assertAllClose(y, bijector.forward(x).eval(), atol=0., rtol=1e-2)
self.assertAllClose(x, bijector.inverse(y).eval(), atol=0., rtol=1e-4)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval(), atol=0., rtol=1e-6)
self.assertAllClose(-ildj, bijector.forward_log_det_jacobian(
x, event_ndims=0).eval(), atol=0., rtol=1e-4)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(Sigmoid(), lower_x=-7., upper_x=7.)
def testBijectiveAndFinite(self):
with self.cached_session():
x = np.linspace(-7., 7., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
assert_bijective_and_finite(
Sigmoid(), x, y, event_ndims=0, atol=0., rtol=1e-4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/sigmoid_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchNorm Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import BatchNormalization
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class BatchNormTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def _reduction_axes(self, input_shape, event_dims):
if isinstance(event_dims, int):
event_dims = [event_dims]
ndims = len(input_shape)
# Convert event_dims to non-negative indexing.
event_dims = list(event_dims)
for idx, x in enumerate(event_dims):
if x < 0:
event_dims[idx] = ndims + x
return tuple(i for i in range(ndims) if i not in event_dims)
def testForwardInverse(self):
"""Tests forward and backward passes with different event shapes.
input_shape: Tuple of shapes for input tensor.
event_dims: Tuple of dimension indices that will be normalized.
training: Boolean of whether bijector runs in training or inference mode.
"""
params = [
((5*2, 4), [-1], False),
((5, 2, 4), [-1], False),
((5, 2, 4), [1, 2], False),
((5, 2, 4), [0, 1], False),
((5*2, 4), [-1], True),
((5, 2, 4), [-1], True),
((5, 2, 4), [1, 2], True),
((5, 2, 4), [0, 1], True)
]
for input_shape, event_dims, training in params:
x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
with self.cached_session() as sess:
x = constant_op.constant(x_)
# When training, memorize the exact mean of the last
# minibatch that it normalized (instead of moving average assignment).
layer = normalization.BatchNormalization(
axis=event_dims, momentum=0., epsilon=0.)
batch_norm = BatchNormalization(
batchnorm_layer=layer, training=training)
# Minibatch statistics are saved only after norm_x has been computed.
norm_x = batch_norm.inverse(x)
with ops.control_dependencies(batch_norm.batchnorm.updates):
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
denorm_x = batch_norm.forward(array_ops.identity(norm_x))
fldj = batch_norm.forward_log_det_jacobian(
x, event_ndims=len(event_dims))
# Use identity to invalidate cache.
ildj = batch_norm.inverse_log_det_jacobian(
array_ops.identity(denorm_x), event_ndims=len(event_dims))
variables.global_variables_initializer().run()
# Update variables.
norm_x_ = sess.run(norm_x)
[
norm_x_,
moving_mean_,
moving_var_,
denorm_x_,
ildj_,
fldj_,
] = sess.run([
norm_x,
moving_mean,
moving_var,
denorm_x,
ildj,
fldj,
])
self.assertEqual("batch_normalization", batch_norm.name)
reduction_axes = self._reduction_axes(input_shape, event_dims)
keepdims = len(event_dims) > 1
expected_batch_mean = np.mean(
x_, axis=reduction_axes, keepdims=keepdims)
expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)
if training:
# When training=True, values become normalized across batch dim and
# original values are recovered after de-normalizing.
zeros = np.zeros_like(norm_x_)
self.assertAllClose(np.mean(zeros, axis=reduction_axes),
np.mean(norm_x_, axis=reduction_axes))
self.assertAllClose(expected_batch_mean, moving_mean_)
self.assertAllClose(expected_batch_var, moving_var_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# Since moving statistics are set to batch statistics after
# normalization, ildj and -fldj should match.
self.assertAllClose(ildj_, -fldj_)
# ildj is computed with minibatch statistics.
expected_ildj = np.sum(np.log(1.) - .5 * np.log(
expected_batch_var + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
else:
# When training=False, moving_mean, moving_var remain at their
# initialized values (0., 1.), resulting in no scale/shift (a small
# shift occurs if epsilon > 0.)
self.assertAllClose(x_, norm_x_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# ildj is computed with saved statistics.
expected_ildj = np.sum(
np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
def testMaximumLikelihoodTraining(self):
# Test Maximum Likelihood training with default bijector.
with self.cached_session() as sess:
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
batch_norm = BatchNormalization(training=True)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm)
target_dist = distributions.MultivariateNormalDiag(loc=[1., 2.])
target_samples = target_dist.sample(100)
dist_samples = dist.sample(3000)
loss = -math_ops.reduce_mean(dist.log_prob(target_samples))
with ops.control_dependencies(batch_norm.batchnorm.updates):
train_op = adam.AdamOptimizer(1e-2).minimize(loss)
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
variables.global_variables_initializer().run()
for _ in range(3000):
sess.run(train_op)
[
dist_samples_,
moving_mean_,
moving_var_
] = sess.run([
dist_samples,
moving_mean,
moving_var
])
self.assertAllClose([1., 2.], np.mean(dist_samples_, axis=0), atol=5e-2)
self.assertAllClose([1., 2.], moving_mean_, atol=5e-2)
self.assertAllClose([1., 1.], moving_var_, atol=5e-2)
def testLogProb(self):
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm,
validate_args=True)
samples = dist.sample(int(1e5))
# No volume distortion since training=False, bijector is initialized
# to the identity transformation.
base_log_prob = base_dist.log_prob(samples)
dist_log_prob = dist.log_prob(samples)
variables.global_variables_initializer().run()
base_log_prob_, dist_log_prob_ = sess.run([base_log_prob, dist_log_prob])
self.assertAllClose(base_log_prob_, dist_log_prob_)
def testMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = Invert(
BatchNormalization(batchnorm_layer=layer, training=False))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/batch_normalization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softsign import Softsign
from tensorflow.python.framework import test_util
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class SoftsignBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = X / (1 + |X|) transformation."""
def _softsign(self, x):
return x / (1. + np.abs(x))
def _softsign_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -2. * np.log1p(-np.abs(y))
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijectorBounds(self):
bijector = Softsign(validate_args=True)
with self.assertRaisesOpError("greater than -1"):
self.evaluate(bijector.inverse(-3.))
with self.assertRaisesOpError("greater than -1"):
self.evaluate(bijector.inverse_log_det_jacobian(-3., event_ndims=0))
with self.assertRaisesOpError("less than 1"):
self.evaluate(bijector.inverse(3.))
with self.assertRaisesOpError("less than 1"):
self.evaluate(bijector.inverse_log_det_jacobian(3., event_ndims=0))
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverse(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
x = 2. * self._rng.randn(2, 10)
y = self._softsign(x)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsZero(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softsign_ildj_before_reduction(y)
self.assertAllClose(ildj, self.evaluate(
bijector.inverse_log_det_jacobian(y, event_ndims=0)))
@test_util.run_in_graph_and_eager_modes
def testBijectorForwardInverseEventDimsOne(self):
bijector = Softsign(validate_args=True)
self.assertEqual("softsign", bijector.name)
x = 2. * self._rng.randn(2, 10)
y = self._softsign(x)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
@test_util.run_in_graph_and_eager_modes
def testBijectorLogDetJacobianEventDimsOne(self):
bijector = Softsign(validate_args=True)
y = self._rng.rand(2, 10)
ildj_before = self._softsign_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(
ildj, self.evaluate(
bijector.inverse_log_det_jacobian(y, event_ndims=1)))
def testScalarCongruency(self):
with self.cached_session():
bijector = Softsign(validate_args=True)
assert_scalar_congruency(bijector, lower_x=-20., upper_x=20.)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Softsign(validate_args=True)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.linspace(-0.99, 0.99, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softsign_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TransformDiagonal bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class TransformDiagonalBijectorTest(test.TestCase):
"""Tests correctness of the TransformDiagonal bijector."""
def setUp(self):
self._rng = np.random.RandomState(42)
@test_util.run_in_graph_and_eager_modes
def testBijector(self):
x = np.float32(np.random.randn(3, 4, 4))
y = x.copy()
for i in range(x.shape[0]):
np.fill_diagonal(y[i, :, :], np.exp(np.diag(x[i, :, :])))
exp = bijectors.Exp()
b = bijectors.TransformDiagonal(diag_bijector=exp)
y_ = self.evaluate(b.forward(x))
self.assertAllClose(y, y_)
x_ = self.evaluate(b.inverse(y))
self.assertAllClose(x, x_)
fldj = self.evaluate(b.forward_log_det_jacobian(x, event_ndims=2))
ildj = self.evaluate(b.inverse_log_det_jacobian(y, event_ndims=2))
self.assertAllEqual(
fldj,
self.evaluate(exp.forward_log_det_jacobian(
np.array([np.diag(x_mat) for x_mat in x]),
event_ndims=1)))
self.assertAllEqual(
ildj,
self.evaluate(exp.inverse_log_det_jacobian(
np.array([np.diag(y_mat) for y_mat in y]),
event_ndims=1)))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/transform_diagonal_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MatrixInverseTriL bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MatrixInverseTriLBijectorTest(test.TestCase):
"""Tests the correctness of the Y = inv(tril) transformation."""
#The inverse of 0 is undefined, as the numbers above the main
#diagonal must be zero, we zero out these numbers after running inverse.
#See: https://github.com/numpy/numpy/issues/11445
def _inv(self, x):
y = np.linalg.inv(x)
#triu_indices only works on 2d arrays
#need to iterate over all the 2d arrays in a x-dimensional array.
for idx in np.ndindex(y.shape[0:-2]):
y[idx][np.triu_indices(y[idx].shape[-1], 1)] = 0
return y
def testComputesCorrectValues(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
self.assertEqual("matrix_inverse_tril", inv.name)
x_ = np.array([[0.7, 0., 0.],
[0.1, -1., 0.],
[0.3, 0.25, 0.5]], dtype=np.float32)
x_inv_ = np.linalg.inv(x_)
expected_fldj_ = -6. * np.sum(np.log(np.abs(np.diag(x_))))
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testOneByOneMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[5.]], dtype=np.float32)
x_inv_ = np.array([[0.2]], dtype=np.float32)
expected_fldj_ = np.log(0.04)
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testZeroByZeroMatrix(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.eye(0, dtype=np.float32)
x_inv_ = np.eye(0, dtype=np.float32)
expected_fldj_ = 0.
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertNear(expected_fldj_, fldj_, err=1e-3)
self.assertNear(-expected_fldj_, ildj_, err=1e-3)
def testBatch(self):
# Test batch computation with input shape (2, 1, 2, 2), i.e. batch shape
# (2, 1).
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[[[1., 0.],
[2., 3.]]],
[[[4., 0.],
[5., -6.]]]], dtype=np.float32)
x_inv_ = self._inv(x_)
expected_fldj_ = -4. * np.sum(
np.log(np.abs(np.diagonal(x_, axis1=-2, axis2=-1))), axis=-1)
y = inv.forward(x_)
x_back = inv.inverse(x_inv_)
fldj = inv.forward_log_det_jacobian(x_, event_ndims=2)
ildj = inv.inverse_log_det_jacobian(x_inv_, event_ndims=2)
y_, x_back_, fldj_, ildj_ = self.evaluate([y, x_back, fldj, ildj])
self.assertAllClose(x_inv_, y_, atol=0., rtol=1e-5)
self.assertAllClose(x_, x_back_, atol=0., rtol=1e-5)
self.assertAllClose(expected_fldj_, fldj_, atol=0., rtol=1e-3)
self.assertAllClose(-expected_fldj_, ildj_, atol=0., rtol=1e-3)
def testErrorOnInputRankTooLow(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([0.1], dtype=np.float32)
rank_error_msg = "must have rank at least 2"
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesWithPredicateMatch(ValueError, rank_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
# TODO(b/80481923): Figure out why these assertions fail, and fix them.
## def testErrorOnInputNonSquare(self):
## inv = bijectors.MatrixInverseTriL(validate_args=True)
## x_ = np.array([[1., 2., 3.],
## [4., 5., 6.]], dtype=np.float32)
## square_error_msg = "must be a square matrix"
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.forward(x_))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.inverse(x_))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
## with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
## square_error_msg):
## self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testErrorOnInputNotLowerTriangular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 2.],
[3., 4.]], dtype=np.float32)
triangular_error_msg = "must be lower triangular"
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
triangular_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
def testErrorOnInputSingular(self):
inv = bijectors.MatrixInverseTriL(validate_args=True)
x_ = np.array([[1., 0.],
[0., 0.]], dtype=np.float32)
nonsingular_error_msg = "must have all diagonal entries nonzero"
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.forward(x_))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.inverse(x_))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.forward_log_det_jacobian(x_, event_ndims=2))
with self.assertRaisesOpError(nonsingular_error_msg):
self.evaluate(inv.inverse_log_det_jacobian(x_, event_ndims=2))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/matrix_inverse_tril_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reshape Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class _ReshapeBijectorTest(object):
"""Base class for testing the reshape transformation.
Methods defined in this class call a method self.build_shapes() that
is implemented by subclasses defined below, returning respectively
ReshapeBijectorTestStatic: static shapes,
ReshapeBijectorTestDynamic: shape placeholders of known ndims, and
ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,
so that each test in this base class is automatically run over all
three cases. The subclasses also implement assertRaisesError to test
for either Python exceptions (in the case of static shapes) or
TensorFlow op errors (dynamic shapes).
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
"""Do a basic sanity check of forward, inverse, jacobian."""
expected_x = np.random.randn(4, 3, 2)
expected_y = np.reshape(expected_x, [4, 6])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
fldj_,
ildj_) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=2),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),
), feed_dict=feed_dict)
self.assertEqual("reshape", bijector.name)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)
def testEventShapeTensor(self):
"""Test event_shape_tensor methods when even ndims may be dynamic."""
shape_in_static = [2, 3]
shape_out_static = [6,]
shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static,
shape_out_static)
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in, validate_args=True)
# using the _tensor methods, we should always get a fully-specified
# result since these are evaluated at graph runtime.
with self.cached_session() as sess:
(shape_out_,
shape_in_) = sess.run((
bijector.forward_event_shape_tensor(shape_in),
bijector.inverse_event_shape_tensor(shape_out),
), feed_dict=feed_dict)
self.assertAllEqual(shape_out_static, shape_out_)
self.assertAllEqual(shape_in_static, shape_in_)
def testScalarReshape(self):
"""Test reshaping to and from a scalar shape ()."""
expected_x = np.random.randn(4, 3, 1)
expected_y = np.reshape(expected_x, [4, 3])
expected_x_scalar = np.random.randn(1,)
expected_y_scalar = expected_x_scalar[0]
shape_in, shape_out, feed_dict = self.build_shapes([], [1,])
with self.cached_session() as sess:
bijector = Reshape(
event_shape_out=shape_in,
event_shape_in=shape_out, validate_args=True)
(x_,
y_,
x_scalar_,
y_scalar_
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.inverse(expected_y_scalar),
bijector.forward(expected_x_scalar),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)
def testMultipleUnspecifiedDimensionsOpError(self):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInvalidDimensionsOpError(self, expected_error_message):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: enable=invalid-name
def testValidButNonMatchingInputOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# Here we pass in a tensor (x) whose shape is compatible with
# the output shape, so tf.reshape will throw no error, but
# doesn't match the expected input shape.
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
def testValidButNonMatchingInputPartiallySpecifiedOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInputOutputMismatchOpError(self, expected_error_message):
x1 = np.random.randn(4, 2, 3)
x2 = np.random.randn(4, 1, 1, 5)
with self.cached_session() as sess:
shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3],
[1, 1, 5])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward(x1), feed_dict=fd_mismatched)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.inverse(x2), feed_dict=fd_mismatched)
# pylint: enable=invalid-name
def testOneShapePartiallySpecified(self):
expected_x = np.random.randn(4, 6)
expected_y = np.reshape(expected_x, [4, 2, 3])
with self.cached_session() as sess:
# one of input/output shapes is partially specified
shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testBothShapesPartiallySpecified(self):
expected_x = np.random.randn(4, 2, 3)
expected_y = np.reshape(expected_x, [4, 3, 2])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testDefaultVectorShape(self):
expected_x = np.random.randn(4, 4)
expected_y = np.reshape(expected_x, [4, 2, 2])
with self.cached_session() as sess:
_, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2])
bijector = Reshape(shape_out,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def build_shapes(self, *args, **kwargs):
raise NotImplementedError("Subclass failed to implement `build_shapes`.")
class ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_static = shape_in
shape_out_static = shape_out
feed_dict = {}
return shape_in_static, shape_out_static, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testEventShape(self):
shape_in_static = tensor_shape.TensorShape([2, 3])
shape_out_static = tensor_shape.TensorShape([6,])
bijector = Reshape(
event_shape_out=shape_out_static,
event_shape_in=shape_in_static, validate_args=True)
# test that forward_ and inverse_event_shape do sensible things
# when shapes are statically known.
self.assertEqual(
bijector.forward_event_shape(shape_in_static),
shape_out_static)
self.assertEqual(
bijector.inverse_event_shape(shape_out_static),
shape_in_static)
def testBijectiveAndFinite(self):
x = np.random.randn(4, 2, 3)
y = np.reshape(x, [4, 1, 2, 3])
with self.cached_session():
bijector = Reshape(
event_shape_in=[2, 3],
event_shape_out=[1, 2, 3],
validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=2, rtol=1e-6, atol=0)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"Invalid value in tensor used for shape: -2")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Cannot reshape a tensor with")
class ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=(len(shape_in),),
dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=(len(shape_out),),
dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
class ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/reshape_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SinhArcsinh Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=g-importing-member
from tensorflow.contrib.distributions.python.ops.bijectors.sinh_arcsinh import SinhArcsinh
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
# pylint: enable=g-importing-member
class SinhArcsinhBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijectorVersusNumpyRewriteOfBasicFunctions(self):
with self.cached_session():
skewness = 0.2
tailweight = 2.0
bijector = SinhArcsinh(
skewness=skewness,
tailweight=tailweight,
validate_args=True)
self.assertEqual("SinhArcsinh", bijector.name)
x = np.array([[[-2.01], [2.], [1e-4]]]).astype(np.float32)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
np.sum(
np.log(np.cosh(np.arcsinh(y) / tailweight - skewness)) -
np.log(tailweight) - np.log(np.sqrt(y**2 + 1)),
axis=-1),
bijector.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testLargerTailWeightPutsMoreWeightInTails(self):
with self.cached_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
tailweight = [[0.5], [1.0], [2.0]]
bijector = SinhArcsinh(tailweight=tailweight, validate_args=True)
y = bijector.forward(x).eval()
# x = -1, 1 should be mapped to points symmetric about 0
self.assertAllClose(y[:, 0], -1. * y[:, 1])
# forward(1) should increase as tailweight increases, since higher
# tailweight should map 1 to a larger number.
forward_1 = y[:, 1] # The positive values of y.
self.assertLess(forward_1[0], forward_1[1])
self.assertLess(forward_1[1], forward_1[2])
def testSkew(self):
with self.cached_session():
# Will broadcast together to shape [3, 2].
x = [-1., 1.]
skewness = [[-1.], [0.], [1.]]
bijector = SinhArcsinh(skewness=skewness, validate_args=True)
y = bijector.forward(x).eval()
# For skew < 0, |forward(-1)| > |forward(1)|
self.assertGreater(np.abs(y[0, 0]), np.abs(y[0, 1]))
# For skew = 0, |forward(-1)| = |forward(1)|
self.assertAllClose(np.abs(y[1, 0]), np.abs(y[1, 1]))
# For skew > 0, |forward(-1)| < |forward(1)|
self.assertLess(np.abs(y[2, 0]), np.abs(y[2, 1]))
def testScalarCongruencySkewness1Tailweight0p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=1.0, tailweight=0.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testScalarCongruencySkewnessNeg1Tailweight1p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=-1.0, tailweight=1.5, validate_args=True)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.0, rtol=0.05)
def testBijectiveAndFiniteSkewnessNeg1Tailweight0p5(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=-1., tailweight=0.5, validate_args=True)
x = np.concatenate((-np.logspace(-2, 10, 1000), [0], np.logspace(
-2, 10, 1000))).astype(np.float32)
assert_bijective_and_finite(bijector, x, x, event_ndims=0, rtol=1e-3)
def testBijectiveAndFiniteSkewness1Tailweight3(self):
with self.cached_session():
bijector = SinhArcsinh(skewness=1., tailweight=3., validate_args=True)
x = np.concatenate((-np.logspace(-2, 5, 1000), [0], np.logspace(
-2, 5, 1000))).astype(np.float32)
assert_bijective_and_finite(
bijector, x, x, event_ndims=0, rtol=1e-3)
def testBijectorEndpoints(self):
with self.cached_session():
for dtype in (np.float32, np.float64):
bijector = SinhArcsinh(
skewness=dtype(0.), tailweight=dtype(1.), validate_args=True)
bounds = np.array(
[np.finfo(dtype).min, np.finfo(dtype).max], dtype=dtype)
# Note that the above bijector is the identity bijector. Hence, the
# log_det_jacobian will be 0. Because of this we use atol.
assert_bijective_and_finite(
bijector, bounds, bounds, event_ndims=0, atol=2e-6)
def testBijectorOverRange(self):
with self.cached_session():
for dtype in (np.float32, np.float64):
skewness = np.array([1.2, 5.], dtype=dtype)
tailweight = np.array([2., 10.], dtype=dtype)
# The inverse will be defined up to where sinh is valid, which is
# arcsinh(np.finfo(dtype).max).
log_boundary = np.log(
np.sinh(np.arcsinh(np.finfo(dtype).max) / tailweight - skewness))
x = np.array([
np.logspace(-2, log_boundary[0], base=np.e, num=1000),
np.logspace(-2, log_boundary[1], base=np.e, num=1000)
], dtype=dtype)
# Ensure broadcasting works.
x = np.swapaxes(x, 0, 1)
y = np.sinh((np.arcsinh(x) + skewness) * tailweight)
bijector = SinhArcsinh(
skewness=skewness, tailweight=tailweight, validate_args=True)
self.assertAllClose(y, bijector.forward(x).eval(), rtol=1e-4, atol=0.)
self.assertAllClose(x, bijector.inverse(y).eval(), rtol=1e-4, atol=0.)
# On IBM PPC systems, longdouble (np.float128) is same as double except that it can have more precision.
# Type double being of 8 bytes, can't hold square of max of float64 (which is also 8 bytes) and
# below test fails due to overflow error giving inf. So this check avoids that error by skipping square
# calculation and corresponding assert.
if np.amax(y) <= np.sqrt(np.finfo(np.float128).max) and \
np.fabs(np.amin(y)) <= np.sqrt(np.fabs(np.finfo(np.float128).min)):
# Do the numpy calculation in float128 to avoid inf/nan.
y_float128 = np.float128(y)
self.assertAllClose(
np.log(np.cosh(
np.arcsinh(y_float128) / tailweight - skewness) / np.sqrt(
y_float128**2 + 1)) -
np.log(tailweight),
bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
def testZeroTailweightRaises(self):
with self.cached_session():
with self.assertRaisesOpError("not positive"):
SinhArcsinh(tailweight=0., validate_args=True).forward(1.0).eval()
def testDefaultDtypeIsFloat32(self):
with self.cached_session():
bijector = SinhArcsinh()
self.assertEqual(bijector.tailweight.dtype, np.float32)
self.assertEqual(bijector.skewness.dtype, np.float32)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/sinh_arcsinh_bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import PowerTransform
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class PowerTransformBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijector(self):
with self.cached_session():
c = 0.2
bijector = PowerTransform(power=c, validate_args=True)
self.assertEqual("power_transform", bijector.name)
x = np.array([[[-1.], [2.], [-5. + 1e-4]]])
y = (1. + x * c)**(1. / c)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
(c - 1.) * np.sum(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
bijector = PowerTransform(power=0.2, validate_args=True)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = PowerTransform(power=0.2, validate_args=True)
x = np.linspace(-4.999, 10, num=10).astype(np.float32)
y = np.logspace(0.001, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class AffineBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.cached_session():
mu = -1.
# scale corresponds to 1.
bijector = Affine(shift=mu)
self.assertEqual("affine", bijector.name)
def testNoBatchMultivariateIdentity(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[1., 0], [0, 1.]]
bijector = Affine(shift=mu)
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1], [-1., -1]]
self.assertAllClose([[2., 0], [0., -2]], run(bijector.forward, x))
self.assertAllClose([[0., 2], [-2., 0]], run(bijector.inverse, x))
self.assertAllClose(
0., run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateDiag(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Multivariate
# Corresponds to scale = [[2., 0], [0, 1.]]
bijector = Affine(shift=mu, scale_diag=[2., 1])
x = [1., 1]
# matmul(sigma, x) + shift
# = [-1, -1] + [1, -1]
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
# Reset bijector.
bijector = Affine(shift=mu, scale_diag=[2., 1])
# x is a 2-batch of 2-vectors.
# The first vector is [1, 1], the second is [-1, -1].
# Each undergoes matmul(sigma, x) + shift.
x = [[1., 1],
[-1., -1]]
self.assertAllClose([[3., 0],
[-1., -2]],
run(bijector.forward, x))
self.assertAllClose([[0., 2],
[-1., 0]],
run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateFullDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
x_value = np.array([[1., 1]], dtype=np.float32)
mu_value = np.array([1., -1], dtype=np.float32)
scale_diag_value = np.array([2., 2], dtype=np.float32)
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
}
bijector = Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[3., 1]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[0., 1]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
-np.log(4),
sess.run(bijector.inverse_log_det_jacobian(x, event_ndims=1),
feed_dict))
def testBatchMultivariateIdentity(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale = 2.
bijector = Affine(shift=mu, scale_identity_multiplier=scale)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateDiag(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = [[1., -1]]
# Corresponds to 1 2x2 matrix, with twos on the diagonal.
scale_diag = [[2., 2]]
bijector = Affine(shift=mu, scale_diag=scale_diag)
x = [[[1., 1]]]
self.assertAllClose([[[3., 1]]], run(bijector.forward, x))
self.assertAllClose([[[0., 1]]], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(4)],
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testBatchMultivariateFullDynamic(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, name="x")
mu = array_ops.placeholder(dtypes.float32, name="mu")
scale_diag = array_ops.placeholder(dtypes.float32, name="scale_diag")
x_value = np.array([[[1., 1]]], dtype=np.float32)
mu_value = np.array([[1., -1]], dtype=np.float32)
scale_diag_value = np.array([[2., 2]], dtype=np.float32)
feed_dict = {
x: x_value,
mu: mu_value,
scale_diag: scale_diag_value,
}
bijector = Affine(shift=mu, scale_diag=scale_diag)
self.assertAllClose([[[3., 1]]], sess.run(bijector.forward(x), feed_dict))
self.assertAllClose([[[0., 1]]], sess.run(bijector.inverse(x), feed_dict))
self.assertAllClose(
[-np.log(4)],
sess.run(bijector.inverse_log_det_jacobian(
x, event_ndims=1), feed_dict))
def testIdentityWithDiagUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_diag=[1., 1., 1.])
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose(
-np.log(2.**3),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 2]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.,
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 5]], run(bijector.forward, x))
self.assertAllClose([[1., 0.5]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(4.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[2., 0], [2, 3]]
bijector = Affine(
shift=mu, scale_diag=[1., 2.], scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[1., 7]], run(bijector.forward, x))
self.assertAllClose([[1., 1 / 3.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(6.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityAndDiagWithTriL(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# scale = [[3., 0], [2, 4]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=1.0,
scale_diag=[1., 2.],
scale_tril=[[1., 0], [2., 1]])
x = [[1., 2]] # One multivariate sample.
self.assertAllClose([[2., 9]], run(bijector.forward, x))
self.assertAllClose([[2 / 3., 5 / 12.]], run(bijector.inverse, x))
self.assertAllClose(
-np.log(12.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
def testIdentityWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 2, 0], [0, 0, 3]]
bijector = Affine(
shift=mu,
scale_identity_multiplier=2.,
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 2, 3])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 3, 8], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1.5, 4 / 3.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(60.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testDiagWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [0, 3, 0], [0, 0, 5]]
bijector = Affine(
shift=mu,
scale_diag=[2., 3, 4],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(shift=mu, scale_diag=[10., 3, 5])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 5, 14], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 1., 0.8], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdate(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[10, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=[2., 1],
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[10., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([9., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([0.2, 14 / 15., 4 / 25.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(150.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testTriLWithVDVTUpdateNoDiagonal(self):
with self.cached_session() as sess:
placeholder = array_ops.placeholder(dtypes.float32, name="x")
def static_run(fun, x, **kwargs):
return fun(x, **kwargs).eval()
def dynamic_run(fun, x_value, **kwargs):
x_value = np.array(x_value)
return sess.run(
fun(placeholder, **kwargs), feed_dict={placeholder: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = [[6, 0, 0], [1, 3, 0], [2, 3, 5]]
bijector = Affine(
shift=mu,
scale_tril=[[2., 0, 0], [1, 3, 0], [2, 3, 4]],
scale_perturb_diag=None,
scale_perturb_factor=[[2., 0], [0., 0], [0, 1]])
bijector_ref = Affine(
shift=mu, scale_tril=[[6., 0, 0], [1, 3, 0], [2, 3, 5]])
x = [1., 2, 3] # Vector.
self.assertAllClose([5., 6, 22], run(bijector.forward, x))
self.assertAllClose(
run(bijector_ref.forward, x), run(bijector.forward, x))
self.assertAllClose([1 / 3., 8 / 9., 4 / 30.], run(bijector.inverse, x))
self.assertAllClose(
run(bijector_ref.inverse, x), run(bijector.inverse, x))
self.assertAllClose(
-np.log(90.),
run(bijector.inverse_log_det_jacobian, x, event_ndims=1))
self.assertAllClose(
run(bijector.inverse_log_det_jacobian, x, event_ndims=1),
run(bijector_ref.inverse_log_det_jacobian, x, event_ndims=1))
def testNoBatchMultivariateRaisesWhenSingular(self):
with self.cached_session():
mu = [1., -1]
bijector = Affine(
shift=mu,
# Has zero on the diagonal.
scale_diag=[0., 1],
validate_args=True)
with self.assertRaisesOpError("diagonal part must be non-zero"):
bijector.forward([1., 1.]).eval()
def _makeScale(self,
x,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None):
"""Create a scale matrix. Return None if it can not be created."""
c = scale_identity_multiplier
d1 = scale_diag
tril = scale_tril
v = scale_perturb_factor
d2 = scale_perturb_diag
# Ambiguous low rank update.
if v is None and d2 is not None:
return None
if c is None and d1 is None and tril is None:
# Special case when no scale args are passed in. This means use an
# identity matrix.
c = 1.
matrix = np.float32(0.)
if c is not None:
# Infer the dimension from x.
matrix += c * self._matrix_diag(np.ones_like(x))
if d1 is not None:
matrix += self._matrix_diag(np.array(d1, dtype=np.float32))
if tril is not None:
matrix += np.array(tril, dtype=np.float32)
if v is not None:
v = np.array(v, dtype=np.float32)
if v.ndim < 2:
vt = v.T
else:
vt = np.swapaxes(v, axis1=v.ndim - 2, axis2=v.ndim - 1)
if d2 is not None:
d2 = self._matrix_diag(np.array(d2, dtype=np.float32))
right = np.matmul(d2, vt)
else:
right = vt
matrix += np.matmul(v, right)
return matrix
def _matrix_diag(self, d):
"""Batch version of np.diag."""
orig_shape = d.shape
d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))
diag_list = []
for i in range(d.shape[0]):
diag_list.append(np.diag(d[i, ...]))
return np.reshape(diag_list, orig_shape + (d.shape[-1],))
def _testLegalInputs(self, shift=None, scale_params=None, x=None):
def _powerset(x):
s = list(x)
return itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1))
for args in _powerset(scale_params.items()):
with self.cached_session():
args = dict(args)
scale_args = dict({"x": x}, **args)
scale = self._makeScale(**scale_args)
# We haven't specified enough information for the scale.
if scale is None:
with self.assertRaisesRegexp(ValueError, ("must be specified.")):
bijector = Affine(shift=shift, **args)
else:
bijector = Affine(shift=shift, **args)
np_x = x
# For the case a vector is passed in, we need to make the shape
# match the matrix for matmul to work.
if x.ndim == scale.ndim - 1:
np_x = np.expand_dims(x, axis=-1)
forward = np.matmul(scale, np_x) + shift
if x.ndim == scale.ndim - 1:
forward = np.squeeze(forward, axis=-1)
self.assertAllClose(forward, bijector.forward(x).eval())
backward = np.linalg.solve(scale, np_x - shift)
if x.ndim == scale.ndim - 1:
backward = np.squeeze(backward, axis=-1)
self.assertAllClose(backward, bijector.inverse(x).eval())
scale *= np.ones(shape=x.shape[:-1], dtype=scale.dtype)
ildj = -np.log(np.abs(np.linalg.det(scale)))
# TODO(jvdillon): We need to make it so the scale_identity_multiplier
# case does not deviate in expected shape. Fixing this will get rid of
# these special cases.
if (ildj.ndim > 0 and (len(scale_args) == 1 or (
len(scale_args) == 2 and
scale_args.get("scale_identity_multiplier", None) is not None))):
ildj = np.squeeze(ildj[0])
elif ildj.ndim < scale.ndim - 2:
ildj = np.reshape(ildj, scale.shape[0:-2])
self.assertAllClose(
ildj, bijector.inverse_log_det_jacobian(x, event_ndims=1).eval())
def testLegalInputs(self):
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [2., 3.],
"scale_tril": [[1., 0.],
[-3., 3.]],
"scale_perturb_factor": [[1., 0],
[1.5, 3.]],
"scale_perturb_diag": [3., 1.]
},
x=np.array(
[1., 2], dtype=np.float32))
def testLegalInputsWithBatch(self):
# Shape of scale is [2, 1, 2, 2]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_identity_multiplier": 2.,
"scale_diag": [[[2., 3.]], [[1., 2]]],
"scale_tril": [[[[1., 0.], [-3., 3.]]], [[[0.5, 0.], [1., 1.]]]],
"scale_perturb_factor": [[[[1., 0], [1.5, 3.]]],
[[[1., 0], [1., 1.]]]],
"scale_perturb_diag": [[[3., 1.]], [[0.5, 1.]]]
},
x=np.array(
[[[1., 2]], [[3., 4]]], dtype=np.float32))
def testNegativeDetTrilPlusVDVT(self):
# scale = [[3.7, 2.7],
# [-0.3, -1.3]]
# inv(scale) = [[0.325, 0.675],
# [-0.075, -0.925]]
# eig(scale) = [3.5324, -1.1324]
self._testLegalInputs(
shift=np.float32(-1),
scale_params={
"scale_tril": [[1., 0], [-3, -4]],
"scale_perturb_factor": [[0.1, 0], [0.5, 0.3]],
"scale_perturb_diag": [3., 1]
},
x=np.array(
[1., 2], dtype=np.float32))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftmaxCenteredBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = exp(X) / sum(exp(X)) transformation."""
def testBijectorVector(self):
with self.cached_session():
softmax = SoftmaxCentered()
self.assertEqual("softmax_centered", softmax.name)
x = np.log([[2., 3, 4], [4., 8, 12]])
y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
self.assertAllClose(y, softmax.forward(x).eval())
self.assertAllClose(x, softmax.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=1),
softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(),
softmax.forward_log_det_jacobian(x, event_ndims=1).eval(),
atol=0.,
rtol=1e-7)
def testBijectorUnknownShape(self):
with self.cached_session():
softmax = SoftmaxCentered()
self.assertEqual("softmax_centered", softmax.name)
x = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_x = np.log([[2., 3, 4], [4., 8, 12]])
y = array_ops.placeholder(shape=[2, None], dtype=dtypes.float32)
real_y = [[0.2, 0.3, 0.4, 0.1], [0.16, 0.32, 0.48, 0.04]]
self.assertAllClose(real_y, softmax.forward(x).eval(
feed_dict={x: real_x}))
self.assertAllClose(real_x, softmax.inverse(y).eval(
feed_dict={y: real_y}))
self.assertAllClose(
-np.sum(np.log(real_y), axis=1),
softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
atol=0.,
rtol=1e-7)
self.assertAllClose(
-softmax.inverse_log_det_jacobian(y, event_ndims=1).eval(
feed_dict={y: real_y}),
softmax.forward_log_det_jacobian(x, event_ndims=1).eval(
feed_dict={x: real_x}),
atol=0.,
rtol=1e-7)
def testShapeGetters(self):
with self.cached_session():
x = tensor_shape.TensorShape([4])
y = tensor_shape.TensorShape([5])
bijector = SoftmaxCentered(validate_args=True)
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(y.as_list(),
bijector.forward_event_shape_tensor(
x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(x.as_list(),
bijector.inverse_event_shape_tensor(
y.as_list()).eval())
def testBijectiveAndFinite(self):
with self.cached_session():
softmax = SoftmaxCentered()
x = np.linspace(-50, 50, num=10).reshape(5, 2).astype(np.float32)
# Make y values on the simplex with a wide range.
y_0 = np.ones(5).astype(np.float32)
y_1 = (1e-5 * rng.rand(5)).astype(np.float32)
y_2 = (1e1 * rng.rand(5)).astype(np.float32)
y = np.array([y_0, y_1, y_2])
y /= y.sum(axis=0)
y = y.T # y.shape = [5, 3]
assert_bijective_and_finite(softmax, x, y, event_ndims=1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softmax_centered_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Kumaraswamy Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.kumaraswamy import Kumaraswamy
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class KumaraswamyBijectorTest(test.TestCase):
"""Tests correctness of the Kumaraswamy bijector."""
def testBijector(self):
with self.cached_session():
a = 2.
b = 0.3
bijector = Kumaraswamy(
concentration1=a, concentration0=b, validate_args=True)
self.assertEqual("kumaraswamy", bijector.name)
x = np.array([[[0.1], [0.2], [0.3], [0.4], [0.5]]], dtype=np.float32)
# Kumaraswamy cdf. This is the same as inverse(x).
y = 1. - (1. - x ** a) ** b
self.assertAllClose(y, bijector.inverse(x).eval())
self.assertAllClose(x, bijector.forward(y).eval())
kumaraswamy_log_pdf = (np.log(a) + np.log(b) + (a - 1) * np.log(x) +
(b - 1) * np.log1p(-x ** a))
self.assertAllClose(
np.squeeze(kumaraswamy_log_pdf, axis=-1),
bijector.inverse_log_det_jacobian(x, event_ndims=1).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(x, event_ndims=1).eval(),
bijector.forward_log_det_jacobian(y, event_ndims=1).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Kumaraswamy(concentration1=0.5, concentration0=1.1),
lower_x=0., upper_x=1., n=int(10e3), rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
concentration1 = 1.2
concentration0 = 2.
bijector = Kumaraswamy(
concentration1=concentration1,
concentration0=concentration0, validate_args=True)
# Omitting the endpoints 0 and 1, since idlj will be infinity at these
# endpoints.
y = np.linspace(.01, 0.99, num=10).astype(np.float32)
x = 1 - (1 - y ** concentration1) ** concentration0
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/kumaraswamy_bijector_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.softplus import Softplus
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
rng = np.random.RandomState(42)
class SoftplusBijectorTest(test.TestCase):
"""Tests the correctness of the Y = g(X) = Log[1 + exp(X)] transformation."""
def _softplus(self, x):
return np.log(1 + np.exp(x))
def _softplus_inverse(self, y):
return np.log(np.exp(y) - 1)
def _softplus_ildj_before_reduction(self, y):
"""Inverse log det jacobian, before being reduced."""
return -np.log(1 - np.exp(-y))
def testHingeSoftnessZeroRaises(self):
with self.cached_session():
bijector = Softplus(hinge_softness=0., validate_args=True)
with self.assertRaisesOpError("must be non-zero"):
bijector.forward([1., 1.]).eval()
def testBijectorForwardInverseEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorForwardInverseWithHingeSoftnessEventDimsZero(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.5)
x = 2 * rng.randn(2, 10)
y = 1.5 * self._softplus(x / 1.5)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsZero(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
# No reduction needed if event_dims = 0.
ildj = self._softplus_ildj_before_reduction(y)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=0).eval())
def testBijectorForwardInverseEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
self.assertEqual("softplus", bijector.name)
x = 2 * rng.randn(2, 10)
y = self._softplus(x)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
def testBijectorLogDetJacobianEventDimsOne(self):
with self.cached_session():
bijector = Softplus()
y = 2 * rng.rand(2, 10)
ildj_before = self._softplus_ildj_before_reduction(y)
ildj = np.sum(ildj_before, axis=1)
self.assertAllClose(ildj, bijector.inverse_log_det_jacobian(
y, event_ndims=1).eval())
def testScalarCongruency(self):
with self.cached_session():
bijector = Softplus()
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithPositiveHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testScalarCongruencyWithNegativeHingeSoftness(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-1.3)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
def testBijectiveAndFinite32bit(self):
with self.cached_session():
bijector = Softplus()
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithPositiveHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=1.23)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFiniteWithNegativeHingeSoftness32Bit(self):
with self.cached_session():
bijector = Softplus(hinge_softness=-0.7)
x = np.linspace(-20., 20., 100).astype(np.float32)
y = -np.logspace(-10, 10, 100).astype(np.float32)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-2, atol=1e-2)
def testBijectiveAndFinite16bit(self):
with self.cached_session():
bijector = Softplus()
# softplus(-20) is zero, so we can't use such a large range as in 32bit.
x = np.linspace(-10., 20., 100).astype(np.float16)
# Note that float16 is only in the open set (0, inf) for a smaller
# logspace range. The actual range was (-7, 4), so use something smaller
# for the test.
y = np.logspace(-6, 3, 100).astype(np.float16)
assert_bijective_and_finite(
bijector, x, y, event_ndims=0, rtol=1e-1, atol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/softplus_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
from tensorflow.contrib.distributions.python.ops.bijectors.weibull import Weibull
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class WeibullBijectorTest(test.TestCase):
"""Tests correctness of the weibull bijector."""
def testBijector(self):
with self.cached_session():
scale = 5.
concentration = 0.3
bijector = Weibull(
scale=scale, concentration=concentration,
validate_args=True)
self.assertEqual("weibull", bijector.name)
x = np.array([[[0.], [1.], [14.], [20.], [100.]]], dtype=np.float32)
# Weibull distribution
weibull_dist = stats.frechet_r(c=concentration, scale=scale)
y = weibull_dist.cdf(x).astype(np.float32)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
weibull_dist.logpdf(x),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y, event_ndims=0).eval(),
bijector.forward_log_det_jacobian(x, event_ndims=0).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.cached_session():
assert_scalar_congruency(
Weibull(scale=20., concentration=0.3),
lower_x=1., upper_x=100., rtol=0.02)
def testBijectiveAndFinite(self):
with self.cached_session():
bijector = Weibull(
scale=20., concentration=2., validate_args=True)
x = np.linspace(1., 8., num=10).astype(np.float32)
y = np.linspace(
-np.expm1(-1 / 400.),
-np.expm1(-16), num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, event_ndims=0, rtol=1e-3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/weibull_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.contrib.distributions.python.ops.bijectors.inline import Inline
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InlineBijectorTest(test.TestCase):
"""Tests correctness of the inline constructed bijector."""
def testBijector(self):
with self.cached_session():
exp = Exp()
inline = Inline(
forward_fn=math_ops.exp,
inverse_fn=math_ops.log,
inverse_log_det_jacobian_fn=lambda y: -math_ops.log(y),
forward_log_det_jacobian_fn=lambda x: x,
forward_min_event_ndims=0,
name="exp")
self.assertEqual(exp.name, inline.name)
x = [[[1., 2.], [3., 4.], [5., 6.]]]
y = np.exp(x)
self.assertAllClose(y, inline.forward(x).eval())
self.assertAllClose(x, inline.inverse(y).eval())
self.assertAllClose(
-np.sum(np.log(y), axis=-1),
inline.inverse_log_det_jacobian(y, event_ndims=1).eval())
self.assertAllClose(
-inline.inverse_log_det_jacobian(y, event_ndims=1).eval(),
inline.forward_log_det_jacobian(x, event_ndims=1).eval())
def testShapeGetters(self):
with self.cached_session():
bijector = Inline(
forward_event_shape_tensor_fn=lambda x: array_ops.concat((x, [1]), 0),
forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_tensor_fn=lambda x: x[:-1],
inverse_event_shape_fn=lambda x: x[:-1],
forward_min_event_ndims=0,
name="shape_only")
x = tensor_shape.TensorShape([1, 2, 3])
y = tensor_shape.TensorShape([1, 2, 3, 1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
y.as_list(),
bijector.forward_event_shape_tensor(x.as_list()).eval())
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
x.as_list(),
bijector.inverse_event_shape_tensor(y.as_list()).eval())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/inline_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The VectorDiffeomixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine_linear_operator import AffineLinearOperator
from tensorflow.contrib.distributions.python.ops.bijectors.softmax_centered import SoftmaxCentered
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.linalg import linear_operator_addition as linop_add_lib
from tensorflow.python.ops.linalg import linear_operator_diag as linop_diag_lib
from tensorflow.python.ops.linalg import linear_operator_full_matrix as linop_full_lib
from tensorflow.python.ops.linalg import linear_operator_identity as linop_identity_lib
from tensorflow.python.ops.linalg import linear_operator_lower_triangular as linop_tril_lib
from tensorflow.python.util import deprecation
__all__ = [
"VectorDiffeomixture",
"quadrature_scheme_softmaxnormal_gauss_hermite",
"quadrature_scheme_softmaxnormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_gauss_hermite(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use Gauss-Hermite quadrature to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_softmaxnormal_quantiles`.
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "quadrature_scheme_softmaxnormal_gauss_hermite",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(dt.dtype.as_numpy_dtype)
probs = probs.astype(dt.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dt)
grid = softmax(
-distribution_util.pad(
(normal_loc[..., array_ops.newaxis] +
np.sqrt(2.) * normal_scale[..., array_ops.newaxis] * grid),
axis=-2,
front=True),
axis=-2) # shape: [B, components, deg]
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_softmaxnormal_quantiles(
normal_loc, normal_scale, quadrature_size,
validate_args=False, name=None):
"""Use SoftmaxNormal quantiles to form quadrature on `K - 1` simplex.
A `SoftmaxNormal` random variable `Y` may be generated via
```
Y = SoftmaxCentered(X),
X = Normal(normal_loc, normal_scale)
```
Args:
normal_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`, B>=0.
The location parameter of the Normal used to construct the SoftmaxNormal.
normal_scale: `float`-like `Tensor`. Broadcastable with `normal_loc`.
The scale parameter of the Normal used to construct the SoftmaxNormal.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
convex combination of affine parameters for `K` components.
`grid[..., :, n]` is the `n`-th grid point, living in the `K - 1` simplex.
probs: Shape `[b1, ..., bB, K, quadrature_size]` `Tensor` representing the
associated with each grid point.
"""
with ops.name_scope(name, "softmax_normal_grid_and_probs",
[normal_loc, normal_scale]):
normal_loc = ops.convert_to_tensor(normal_loc, name="normal_loc")
dt = normal_loc.dtype.base_dtype
normal_scale = ops.convert_to_tensor(
normal_scale, dtype=dt, name="normal_scale")
normal_scale = maybe_check_quadrature_param(
normal_scale, "normal_scale", validate_args)
dist = normal_lib.Normal(loc=normal_loc, scale=normal_scale)
def _get_batch_ndims():
"""Helper to get dist.batch_shape.ndims, statically if possible."""
ndims = dist.batch_shape.ndims
if ndims is None:
ndims = array_ops.shape(dist.batch_shape_tensor())[0]
return ndims
batch_ndims = _get_batch_ndims()
def _get_final_shape(qs):
"""Helper to build `TensorShape`."""
bs = dist.batch_shape.with_rank_at_least(1)
num_components = tensor_shape.dimension_value(bs[-1])
if num_components is not None:
num_components += 1
tail = tensor_shape.TensorShape([num_components, qs])
return bs[:-1].concatenate(tail)
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
quantiles = SoftmaxCentered().forward(quantiles)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
quantiles.set_shape(_get_final_shape(quadrature_size + 1))
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(_get_final_shape(quadrature_size))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class VectorDiffeomixture(distribution_lib.Distribution):
"""VectorDiffeomixture distribution.
A vector diffeomixture (VDM) is a distribution parameterized by a convex
combination of `K` component `loc` vectors, `loc[k], k = 0,...,K-1`, and `K`
`scale` matrices `scale[k], k = 0,..., K-1`. It approximates the following
[compound distribution]
(https://en.wikipedia.org/wiki/Compound_probability_distribution)
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
The integral `int p(x | z) p(z) dz` is approximated with a quadrature scheme
adapted to the mixture density `p(z)`. The `N` quadrature points `z_{N, n}`
and weights `w_{N, n}` (which are non-negative and sum to 1) are chosen
such that
```q_N(x) := sum_{n=1}^N w_{n, N} p(x | z_{N, n}) --> p(x)```
as `N --> infinity`.
Since `q_N(x)` is in fact a mixture (of `N` points), we may sample from
`q_N` exactly. It is important to note that the VDM is *defined* as `q_N`
above, and *not* `p(x)`. Therefore, sampling and pdf may be implemented as
exact (up to floating point error) methods.
A common choice for the conditional `p(x | z)` is a multivariate Normal.
The implemented marginal `p(z)` is the `SoftmaxNormal`, which is a
`K-1` dimensional Normal transformed by a `SoftmaxCentered` bijector, making
it a density on the `K`-simplex. That is,
```
Z = SoftmaxCentered(X),
X = Normal(mix_loc / temperature, 1 / temperature)
```
The default quadrature scheme chooses `z_{N, n}` as `N` midpoints of
the quantiles of `p(z)` (generalized quantiles if `K > 2`).
See [Dillon and Langmore (2018)][1] for more details.
#### About `Vector` distributions in TensorFlow.
The `VectorDiffeomixture` is a non-standard distribution that has properties
particularly useful in [variational Bayesian
methods](https://en.wikipedia.org/wiki/Variational_Bayesian_methods).
Conditioned on a draw from the SoftmaxNormal, `X|z` is a vector whose
components are linear combinations of affine transformations, thus is itself
an affine transformation.
Note: The marginals `X_1|v, ..., X_d|v` are *not* generally identical to some
parameterization of `distribution`. This is due to the fact that the sum of
draws from `distribution` are not generally itself the same `distribution`.
#### About `Diffeomixture`s and reparameterization.
The `VectorDiffeomixture` is designed to be reparameterized, i.e., its
parameters are only used to transform samples from a distribution which has no
trainable parameters. This property is important because backprop stops at
sources of stochasticity. That is, as long as the parameters are used *after*
the underlying source of stochasticity, the computed gradient is accurate.
Reparametrization means that we can use gradient-descent (via backprop) to
optimize Monte-Carlo objectives. Such objectives are a finite-sample
approximation of an expectation and arise throughout scientific computing.
WARNING: If you backprop through a VectorDiffeomixture sample and the "base"
distribution is both: not `FULLY_REPARAMETERIZED` and a function of trainable
variables, then the gradient is not guaranteed correct!
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create two batches of VectorDiffeomixtures, one with mix_loc=[0.],
# another with mix_loc=[1]. In both cases, `K=2` and the affine
# transformations involve:
# k=0: loc=zeros(dims) scale=LinearOperatorScaledIdentity
# k=1: loc=[2.]*dims scale=LinOpDiag
dims = 5
vdm = tfd.VectorDiffeomixture(
mix_loc=[[0.], [1]],
temperature=[1.],
distribution=tfd.Normal(loc=0., scale=1.),
loc=[
None, # Equivalent to `np.zeros(dims, dtype=np.float32)`.
np.float32([2.]*dims),
],
scale=[
tf.linalg.LinearOperatorScaledIdentity(
num_rows=dims,
multiplier=np.float32(1.1),
is_positive_definite=True),
tf.linalg.LinearOperatorDiag(
diag=np.linspace(2.5, 3.5, dims, dtype=np.float32),
is_positive_definite=True),
],
validate_args=True)
```
#### References
[1]: Joshua Dillon and Ian Langmore. Quadrature Compound: An approximating
family of distributions. _arXiv preprint arXiv:1801.03080_, 2018.
https://arxiv.org/abs/1801.03080
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
mix_loc,
temperature,
distribution,
loc=None,
scale=None,
quadrature_size=8,
quadrature_fn=quadrature_scheme_softmaxnormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="VectorDiffeomixture"):
"""Constructs the VectorDiffeomixture on `R^d`.
The vector diffeomixture (VDM) approximates the compound distribution
```none
p(x) = int p(x | z) p(z) dz,
where z is in the K-simplex, and
p(x | z) := p(x | loc=sum_k z[k] loc[k], scale=sum_k z[k] scale[k])
```
Args:
mix_loc: `float`-like `Tensor` with shape `[b1, ..., bB, K-1]`.
In terms of samples, larger `mix_loc[..., k]` ==>
`Z` is more likely to put more weight on its `kth` component.
temperature: `float`-like `Tensor`. Broadcastable with `mix_loc`.
In terms of samples, smaller `temperature` means one component is more
likely to dominate. I.e., smaller `temperature` makes the VDM look more
like a standard mixture of `K` components.
distribution: `tf.Distribution`-like instance. Distribution from which `d`
iid samples are used as input to the selected affine transformation.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorDiffeomixture sample and the `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
loc: Length-`K` list of `float`-type `Tensor`s. The `k`-th element
represents the `shift` used for the `k`-th affine transformation. If
the `k`-th item is `None`, `loc` is implicitly `0`. When specified,
must have shape `[B1, ..., Bb, d]` where `b >= 0` and `d` is the event
size.
scale: Length-`K` list of `LinearOperator`s. Each should be
positive-definite and operate on a `d`-dimensional vector space. The
`k`-th element represents the `scale` used for the `k`-th affine
transformation. `LinearOperator`s must have shape `[B1, ..., Bb, d, d]`,
`b >= 0`, i.e., characterizes `b`-batches of `d x d` matrices
quadrature_size: Python `int` scalar representing number of
quadrature points. Larger `quadrature_size` means `q_N(x)` better
approximates `p(x)`.
quadrature_fn: Python callable taking `normal_loc`, `normal_scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the SoftmaxNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_softmaxnormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if `not scale or len(scale) < 2`.
ValueError: if `len(loc) != len(scale)`
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
ValueError: if `validate_args` and any not scale.is_positive_definite.
TypeError: if any scale.dtype != scale[0].dtype.
TypeError: if any loc.dtype != scale[0].dtype.
NotImplementedError: if `len(scale) != 2`.
ValueError: if `not distribution.is_scalar_batch`.
ValueError: if `not distribution.is_scalar_event`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[mix_loc, temperature]) as name:
if not scale or len(scale) < 2:
raise ValueError("Must specify list (or list-like object) of scale "
"LinearOperators, one for each component with "
"num_component >= 2.")
if loc is None:
loc = [None]*len(scale)
if len(loc) != len(scale):
raise ValueError("loc/scale must be same-length lists "
"(or same-length list-like objects).")
dtype = scale[0].dtype.base_dtype
loc = [ops.convert_to_tensor(loc_, dtype=dtype, name="loc{}".format(k))
if loc_ is not None else None
for k, loc_ in enumerate(loc)]
for k, scale_ in enumerate(scale):
if validate_args and not scale_.is_positive_definite:
raise ValueError("scale[{}].is_positive_definite = {} != True".format(
k, scale_.is_positive_definite))
if scale_.dtype.base_dtype != dtype:
raise TypeError(
"dtype mismatch; scale[{}].base_dtype=\"{}\" != \"{}\"".format(
k, scale_.dtype.base_dtype.name, dtype.name))
self._endpoint_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="endpoint_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(loc, scale))]
# TODO(jvdillon): Remove once we support k-mixtures.
# We make this assertion here because otherwise `grid` would need to be a
# vector not a scalar.
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
mix_loc = ops.convert_to_tensor(
mix_loc, dtype=dtype, name="mix_loc")
temperature = ops.convert_to_tensor(
temperature, dtype=dtype, name="temperature")
self._grid, probs = tuple(quadrature_fn(
mix_loc / temperature,
1. / temperature,
quadrature_size,
validate_args))
# Note: by creating the logits as `log(prob)` we ensure that
# `self.mixture_distribution.logits` is equivalent to
# `math_ops.log(self.mixture_distribution.probs)`.
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
self._grid = control_flow_ops.with_dependencies(
asserts, self._grid)
self._distribution = distribution
self._interpolated_affine = [
AffineLinearOperator(shift=loc_,
scale=scale_,
validate_args=validate_args,
name="interpolated_affine_{}".format(k))
for k, (loc_, scale_) in enumerate(zip(
interpolate_loc(self._grid, loc),
interpolate_scale(self._grid, scale)))]
[
self._batch_shape_,
self._batch_shape_tensor_,
self._event_shape_,
self._event_shape_tensor_,
] = determine_batch_event_shapes(self._grid,
self._endpoint_affine)
super(VectorDiffeomixture, self).__init__(
dtype=dtype,
# We hard-code `FULLY_REPARAMETERIZED` because when
# `validate_args=True` we verify that indeed
# `distribution.reparameterization_type == FULLY_REPARAMETERIZED`. A
# distribution which is a function of only non-trainable parameters
# also implies we can use `FULLY_REPARAMETERIZED`. However, we cannot
# easily test for that possibility thus we use `validate_args=False`
# as a "back-door" to allow users a way to use non
# `FULLY_REPARAMETERIZED` distribution. In such cases IT IS THE USERS
# RESPONSIBILITY to verify that the base distribution is a function of
# non-trainable parameters.
reparameterization_type=distribution_lib.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=(
distribution._graph_parents # pylint: disable=protected-access
+ [loc_ for loc_ in loc if loc_ is not None]
+ [p for scale_ in scale for p in scale_.graph_parents]),
name=name)
@property
def mixture_distribution(self):
"""Distribution used to select a convex combination of affine transforms."""
return self._mixture_distribution
@property
def distribution(self):
"""Base scalar-event, scalar-batch distribution."""
return self._distribution
@property
def grid(self):
"""Grid of mixing probabilities, one for each grid point."""
return self._grid
@property
def endpoint_affine(self):
"""Affine transformation for each of `K` components."""
return self._endpoint_affine
@property
def interpolated_affine(self):
"""Affine transformation for each convex combination of `K` components."""
return self._interpolated_affine
def _batch_shape_tensor(self):
return self._batch_shape_tensor_
def _batch_shape(self):
return self._batch_shape_
def _event_shape_tensor(self):
return self._event_shape_tensor_
def _event_shape(self):
return self._event_shape_
def _sample_n(self, n, seed=None):
x = self.distribution.sample(
sample_shape=concat_vectors(
[n],
self.batch_shape_tensor(),
self.event_shape_tensor()),
seed=seed) # shape: [n, B, e]
x = [aff.forward(x) for aff in self.endpoint_affine]
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = array_ops.reduce_prod(self.batch_shape_tensor())
mix_batch_size = self.mixture_distribution.batch_shape.num_elements()
if mix_batch_size is None:
mix_batch_size = math_ops.reduce_prod(
self.mixture_distribution.batch_shape_tensor())
ids = self.mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
[batch_size // mix_batch_size])),
seed=distribution_util.gen_new_seed(
seed, "vector_diffeomixture"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `components * quadrature_size` for `batch_size` number of times.
stride = self.grid.shape.with_rank_at_least(
2)[-2:].num_elements()
if stride is None:
stride = array_ops.reduce_prod(
array_ops.shape(self.grid)[-2:])
offset = math_ops.range(start=0,
limit=batch_size * stride,
delta=stride,
dtype=ids.dtype)
weight = array_ops.gather(
array_ops.reshape(self.grid, shape=[-1]),
ids + offset)
# At this point, weight flattened all batch dims into one.
# We also need to append a singleton to broadcast with event dims.
if self.batch_shape.is_fully_defined():
new_shape = [-1] + self.batch_shape.as_list() + [1]
else:
new_shape = array_ops.concat(
([-1], self.batch_shape_tensor(), [1]), axis=0)
weight = array_ops.reshape(weight, shape=new_shape)
if len(x) != 2:
# We actually should have already triggered this exception. However as a
# policy we're putting this exception wherever we exploit the bimixture
# assumption.
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(x)))
# Alternatively:
# x = weight * x[0] + (1. - weight) * x[1]
x = weight * (x[0] - x[1]) + x[1]
return x
def _log_prob(self, x):
# By convention, we always put the grid points right-most.
y = array_ops.stack(
[aff.inverse(x) for aff in self.interpolated_affine],
axis=-1)
log_prob = math_ops.reduce_sum(self.distribution.log_prob(y), axis=-2)
# Because the affine transformation has a constant Jacobian, it is the case
# that `affine.fldj(x) = -affine.ildj(x)`. This is not true in general.
fldj = array_ops.stack([
aff.forward_log_det_jacobian(
x,
event_ndims=array_ops.rank(self.event_shape_tensor())
) for aff in self.interpolated_affine], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits - fldj + log_prob, axis=-1)
def _mean(self):
p = self._expand_mix_distribution_probs()
m = self._expand_base_distribution_mean()
mean = None
for k, aff in enumerate(self.interpolated_affine):
# aff.forward is going to do this:
# y = array_ops.squeeze(aff.scale.matmul(m), axis=[-1])
# if aff.shift is not None:
# y += aff.shift
mean = add(mean, p[..., k] * aff.forward(m))
return mean
def _covariance(self):
# Law of total variance:
#
# Cov[Z] = E[Cov[Z | V]] + Cov[E[Z | V]]
#
# where,
#
# E[Cov[Z | V]] = sum_i mix_prob[i] Scale[i]
# Cov[E[Z | V]] = sum_i mix_prob[i] osquare(loc[i])
# - osquare(sum_i mix_prob[i] loc[i])
#
# osquare(x) = x.transpose @ x
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=False),
self._covariance_of_mean_given_quadrature_component(diag_only=False))
def _variance(self):
# Equivalent to: tf.linalg.tensor_diag_part(self._covariance()),
return add(
self._mean_of_covariance_given_quadrature_component(diag_only=True),
self._covariance_of_mean_given_quadrature_component(diag_only=True))
def _mean_of_covariance_given_quadrature_component(self, diag_only):
p = self.mixture_distribution.probs
# To compute E[Cov(Z|V)], we'll add matrices within three categories:
# scaled-identity, diagonal, and full. Then we'll combine these at the end.
scale_identity_multiplier = None
diag = None
full = None
for k, aff in enumerate(self.interpolated_affine):
s = aff.scale # Just in case aff.scale has side-effects, we'll call once.
if (s is None
or isinstance(s, linop_identity_lib.LinearOperatorIdentity)):
scale_identity_multiplier = add(scale_identity_multiplier,
p[..., k, array_ops.newaxis])
elif isinstance(s, linop_identity_lib.LinearOperatorScaledIdentity):
scale_identity_multiplier = add(
scale_identity_multiplier,
(p[..., k, array_ops.newaxis] * math_ops.square(s.multiplier)))
elif isinstance(s, linop_diag_lib.LinearOperatorDiag):
diag = add(diag, (p[..., k, array_ops.newaxis] *
math_ops.square(s.diag_part())))
else:
x = (p[..., k, array_ops.newaxis, array_ops.newaxis] *
s.matmul(s.to_dense(), adjoint_arg=True))
if diag_only:
x = array_ops.matrix_diag_part(x)
full = add(full, x)
# We must now account for the fact that the base distribution might have a
# non-unity variance. Recall that, since X ~ iid Law(X_0),
# `Cov(SX+m) = S Cov(X) S.T = S S.T Diag(Var(X_0))`.
# We can scale by `Var(X)` (vs `Cov(X)`) since X corresponds to `d` iid
# samples from a scalar-event distribution.
v = self.distribution.variance()
if scale_identity_multiplier is not None:
scale_identity_multiplier *= v
if diag is not None:
diag *= v[..., array_ops.newaxis]
if full is not None:
full *= v[..., array_ops.newaxis]
if diag_only:
# Apparently we don't need the full matrix, just the diagonal.
r = add(diag, full)
if r is None and scale_identity_multiplier is not None:
ones = array_ops.ones(self.event_shape_tensor(), dtype=self.dtype)
return scale_identity_multiplier[..., array_ops.newaxis] * ones
return add(r, scale_identity_multiplier)
# `None` indicates we don't know if the result is positive-definite.
is_positive_definite = (True if all(aff.scale.is_positive_definite
for aff in self.endpoint_affine)
else None)
to_add = []
if diag is not None:
to_add.append(linop_diag_lib.LinearOperatorDiag(
diag=diag,
is_positive_definite=is_positive_definite))
if full is not None:
to_add.append(linop_full_lib.LinearOperatorFullMatrix(
matrix=full,
is_positive_definite=is_positive_definite))
if scale_identity_multiplier is not None:
to_add.append(linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=self.event_shape_tensor()[0],
multiplier=scale_identity_multiplier,
is_positive_definite=is_positive_definite))
return (linop_add_lib.add_operators(to_add)[0].to_dense()
if to_add else None)
def _covariance_of_mean_given_quadrature_component(self, diag_only):
square = math_ops.square if diag_only else vec_osquare
p = self._expand_mix_distribution_probs()
if not diag_only:
p = p[..., array_ops.newaxis, :] # Assuming event.ndims=1.
m = self._expand_base_distribution_mean()
cov_e_z_given_v = None
e_z_given_v = self._mean()
for k, aff in enumerate(self.interpolated_affine):
y = aff.forward(m)
cov_e_z_given_v = add(cov_e_z_given_v,
p[..., k] * square(y - e_z_given_v))
return cov_e_z_given_v
def _expand_base_distribution_mean(self):
"""Ensures `self.distribution.mean()` has `[batch, event]` shape."""
single_draw_shape = concat_vectors(self.batch_shape_tensor(),
self.event_shape_tensor())
m = array_ops.reshape(
self.distribution.mean(), # A scalar.
shape=array_ops.ones_like(single_draw_shape,
dtype=dtypes.int32))
m = array_ops.tile(m, multiples=single_draw_shape)
m.set_shape(self.batch_shape.concatenate(self.event_shape))
return m
def _expand_mix_distribution_probs(self):
p = self.mixture_distribution.probs # [B, deg]
deg = tensor_shape.dimension_value(p.shape.with_rank_at_least(1)[-1])
if deg is None:
deg = array_ops.shape(p)[-1]
event_ndims = self.event_shape.ndims
if event_ndims is None:
event_ndims = array_ops.shape(self.event_shape_tensor())[0]
expand_shape = array_ops.concat([
self.mixture_distribution.batch_shape_tensor(),
array_ops.ones([event_ndims], dtype=dtypes.int32),
[deg],
], axis=0)
return array_ops.reshape(p, shape=expand_shape)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def maybe_check_quadrature_param(param, name, validate_args):
"""Helper which checks validity of `loc` and `scale` init args."""
with ops.name_scope(name="check_" + name, values=[param]):
assertions = []
if param.shape.ndims is not None:
if param.shape.ndims == 0:
raise ValueError("Mixing params must be a (batch of) vector; "
"{}.rank={} is not at least one.".format(
name, param.shape.ndims))
elif validate_args:
assertions.append(check_ops.assert_rank_at_least(
param, 1,
message=("Mixing params must be a (batch of) vector; "
"{}.rank is not at least one.".format(
name))))
# TODO(jvdillon): Remove once we support k-mixtures.
if param.shape.with_rank_at_least(1)[-1] is not None:
if tensor_shape.dimension_value(param.shape[-1]) != 1:
raise NotImplementedError("Currently only bimixtures are supported; "
"{}.shape[-1]={} is not 1.".format(
name,
tensor_shape.dimension_value(
param.shape[-1])))
elif validate_args:
assertions.append(check_ops.assert_equal(
array_ops.shape(param)[-1], 1,
message=("Currently only bimixtures are supported; "
"{}.shape[-1] is not 1.".format(name))))
if assertions:
return control_flow_ops.with_dependencies(assertions, param)
return param
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def determine_batch_event_shapes(grid, endpoint_affine):
"""Helper to infer batch_shape and event_shape."""
with ops.name_scope(name="determine_batch_event_shapes"):
# grid # shape: [B, k, q]
# endpoint_affine # len=k, shape: [B, d, d]
batch_shape = grid.shape[:-2]
batch_shape_tensor = array_ops.shape(grid)[:-2]
event_shape = None
event_shape_tensor = None
def _set_event_shape(shape, shape_tensor):
if event_shape is None:
return shape, shape_tensor
return (array_ops.broadcast_static_shape(event_shape, shape),
array_ops.broadcast_dynamic_shape(
event_shape_tensor, shape_tensor))
for aff in endpoint_affine:
if aff.shift is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.shift.shape[:-1])
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, array_ops.shape(aff.shift)[:-1])
event_shape, event_shape_tensor = _set_event_shape(
aff.shift.shape[-1:], array_ops.shape(aff.shift)[-1:])
if aff.scale is not None:
batch_shape = array_ops.broadcast_static_shape(
batch_shape, aff.scale.batch_shape)
batch_shape_tensor = array_ops.broadcast_dynamic_shape(
batch_shape_tensor, aff.scale.batch_shape_tensor())
event_shape, event_shape_tensor = _set_event_shape(
tensor_shape.TensorShape([aff.scale.range_dimension]),
aff.scale.range_dimension_tensor()[array_ops.newaxis])
return batch_shape, batch_shape_tensor, event_shape, event_shape_tensor
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_loc(grid, loc):
"""Helper which interpolates between two locs."""
if len(loc) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(loc)))
deg = tensor_shape.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_loc", values=[grid, loc]):
if loc is None or loc[0] is None and loc[1] is None:
return [None]*deg
# shape: [B, 1, k, deg]
w = grid[..., array_ops.newaxis, :, :]
loc = [x[..., array_ops.newaxis] # shape: [B, e, 1]
if x is not None else None for x in loc]
if loc[0] is None:
x = w[..., 1, :] * loc[1] # shape: [B, e, deg]
elif loc[1] is None:
x = w[..., 0, :] * loc[0] # shape: [B, e, deg]
else:
delta = loc[0] - loc[1]
x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg]
return [x[..., k] for k in range(deg)] # list(shape:[B, e])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def interpolate_scale(grid, scale):
"""Helper which interpolates between two scales."""
if len(scale) != 2:
raise NotImplementedError("Currently only bimixtures are supported; "
"len(scale)={} is not 2.".format(len(scale)))
deg = tensor_shape.dimension_value(grid.shape.with_rank_at_least(1)[-1])
if deg is None:
raise ValueError("Num quadrature grid points must be known prior "
"to graph execution.")
with ops.name_scope("interpolate_scale", values=[grid]):
return [linop_add_lib.add_operators([
linop_scale(grid[..., k, q], s)
for k, s in enumerate(scale)
])[0] for q in range(deg)]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def linop_scale(w, op):
# We assume w > 0. (This assumption only relates to the is_* attributes.)
with ops.name_scope("linop_scale", values=[w]):
# TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so
# special case combinations here. Once it does, this function can be
# replaced by:
# return linop_composition_lib.LinearOperatorComposition([
# scaled_identity(w), op])
def scaled_identity(w):
return linop_identity_lib.LinearOperatorScaledIdentity(
num_rows=op.range_dimension_tensor(),
multiplier=w,
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_identity_lib.LinearOperatorIdentity):
return scaled_identity(w)
if isinstance(op, linop_identity_lib.LinearOperatorScaledIdentity):
return scaled_identity(w * op.multiplier)
if isinstance(op, linop_diag_lib.LinearOperatorDiag):
return linop_diag_lib.LinearOperatorDiag(
diag=w[..., array_ops.newaxis] * op.diag_part(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
if isinstance(op, linop_tril_lib.LinearOperatorLowerTriangular):
return linop_tril_lib.LinearOperatorLowerTriangular(
tril=w[..., array_ops.newaxis, array_ops.newaxis] * op.to_dense(),
is_non_singular=op.is_non_singular,
is_self_adjoint=op.is_self_adjoint,
is_positive_definite=op.is_positive_definite)
raise NotImplementedError(
"Unsupported Linop type ({})".format(type(op).__name__))
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def add(x, y):
"""Adds inputs; interprets `None` as zero."""
if x is None:
return y
if y is None:
return x
return x + y
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def vec_osquare(x):
"""Computes the outer-product of a (batch of) vector, i.e., x.T x."""
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def softmax(x, axis, name=None):
"""Equivalent to tf.nn.softmax but works around b/70297725."""
with ops.name_scope(name, "softmax", [x, axis]):
x = ops.convert_to_tensor(x, name="x")
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x, name="ndims"))
axis = ops.convert_to_tensor(axis, dtype=dtypes.int32, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = np.int(ndims + axis_ if axis_ < 0 else axis_)
else:
axis = array_ops.where(axis < 0, ndims + axis, axis)
return nn_ops.softmax(x, axis=axis)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_diffeomixture.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"InverseGamma",
"InverseGammaWithSoftplusConcentrationRate",
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `rate = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small concentration
values. See note in `tf.random.gamma` docstring.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.InverseGamma(concentration=3.0, rate=2.0)
dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGamma"):
"""Construct InverseGamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random.gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.igammac(self.concentration, self.rate / x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return -(1. + self.concentration) * math_ops.log(x) - self.rate / x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
+ math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
- ((1. + self.concentration) *
math_ops.digamma(self.concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`rate / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
mean = self.rate / (self.concentration - 1.)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mean, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype), self.concentration,
message="mean undefined when any concentration <= 1"),
], mean)
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
var = (
math_ops.square(self.rate) / math_ops.squared_difference(
self.concentration, 1.) / (self.concentration - 2.))
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 2., var, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
constant_op.constant(2., dtype=self.dtype),
self.concentration,
message="variance undefined when any concentration <= 2"),
], var)
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `rate / (concentration +
1)`.""")
def _mode(self):
return self.rate / (1. + self.concentration)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(
tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class InverseGammaWithSoftplusConcentrationRate(InverseGamma):
"""`InverseGamma` with softplus of `concentration` and `rate`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="InverseGammaWithSoftplusConcentrationRate"):
parameters = dict(locals())
with ops.name_scope(name, values=[concentration, rate]) as name:
super(InverseGammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/inverse_gamma.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The OneHotCategorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class OneHotCategorical(distribution.Distribution):
"""OneHotCategorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes. The difference between OneHotCategorical and Categorical
distributions is that OneHotCategorical is a discrete distribution over
one-hot bit vectors whereas Categorical is a discrete distribution over
positive integers. OneHotCategorical is equivalent to Categorical except
Categorical has event_dim=() while OneHotCategorical has event_dim=K, where
K is the number of classes.
This class provides methods to create indexed batches of OneHotCategorical
distributions. If the provided `logits` or `probs` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single OneHotCategorical distribution. When calling distribution
functions (e.g. `dist.prob(x)`), `logits` and `x` are broadcast to the
same shape (if possible). In all cases, the last dimension of `logits,x`
represents single OneHotCategorical distributions.
#### Examples
Creates a 3-class distribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = OneHotCategorical(probs=p)
```
Creates a 3-class distribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-2, 2, 0]
dist = OneHotCategorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = OneHotCategorical(probs=p)
dist.prob([0,1,0]) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match.
samples = [[0,1,0], [1,0,0]]
dist.prob(samples) # Shape [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="OneHotCategorical"):
"""Initialize OneHotCategorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a
set of Categorical distributions. The first `N - 1` dimensions index
into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set
of Categorical distributions. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of probabilities for each class. Only one of `logits` or `probs`
should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
with ops.name_scope(name="event_size"):
self._event_size = array_ops.shape(self._logits)[-1]
super(OneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self.logits)[:-1]
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.logits)[-1:]
def _event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = array_ops.transpose(samples)
samples = array_ops.one_hot(samples, self.event_size, dtype=self.dtype)
ret = array_ops.reshape(samples, sample_shape)
return ret
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, -1))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
logits=logits_2d)
# Reshape back to user-supplied batch and sample dims prior to 2D reshape.
ret = array_ops.reshape(ret, logits_shape)
return ret
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = array_ops.one_hot(ret, self.event_size, dtype=self.dtype)
ret.set_shape(self.logits.get_shape())
return ret
def _covariance(self):
p = self.probs
ret = -math_ops.matmul(p[..., None], p[..., None, :])
return array_ops.matrix_set_diag(ret, self._variance())
def _variance(self):
return self.probs * (1. - self.probs)
def _assert_valid_sample(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_positive(x),
check_ops.assert_near(
array_ops.zeros([], dtype=self.dtype),
math_ops.reduce_logsumexp(x, axis=[-1])),
], x)
@kullback_leibler.RegisterKL(OneHotCategorical, OneHotCategorical)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical", values=[
a.logits, b.logits]):
# sum(p ln(p / q))
return math_ops.reduce_sum(
nn_ops.softmax(a.logits) * (nn_ops.log_softmax(a.logits)
- nn_ops.log_softmax(b.logits)),
axis=-1)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/onehot_categorical.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The RelaxedBernoulli distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import logistic
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid import Sigmoid
# Bijectors must be directly imported because `remove_undocumented` prevents
# individual file imports.
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class RelaxedBernoulli(transformed_distribution.TransformedDistribution):
"""RelaxedBernoulli distribution with temperature and logits parameters.
The RelaxedBernoulli is a distribution over the unit interval (0,1), which
continuously approximates a Bernoulli. The degree of approximation is
controlled by a temperature: as the temperature goes to 0 the
RelaxedBernoulli becomes discrete with a distribution described by the
`logits` or `probs` parameters, as the temperature goes to infinity the
RelaxedBernoulli becomes the constant distribution that is identically 0.5.
The RelaxedBernoulli distribution is a reparameterized continuous
distribution that is the binary special case of the RelaxedOneHotCategorical
distribution (Maddison et al., 2016; Jang et al., 2016). For details on the
binary special case see the appendix of Maddison et al. (2016) where it is
referred to as BinConcrete. If you use this distribution, please cite both
papers.
Some care needs to be taken for loss functions that depend on the
log-probability of RelaxedBernoullis, because computing log-probabilities of
the RelaxedBernoulli can suffer from underflow issues. In many case loss
functions such as these are invariant under invertible transformations of
the random variables. The KL divergence, found in the variational autoencoder
loss, is an example. Because RelaxedBernoullis are sampled by a Logistic
random variable followed by a `tf.sigmoid` op, one solution is to treat
the Logistic as the random variable and `tf.sigmoid` as downstream. The
KL divergences of two Logistics, which are always followed by a `tf.sigmoid`
op, is equivalent to evaluating KL divergences of RelaxedBernoulli samples.
See Maddison et al., 2016 for more details where this distribution is called
the BinConcrete.
An alternative approach is to evaluate Bernoulli log probability or KL
directly on relaxed samples, as done in Jang et al., 2016. In this case,
guarantees on the loss are usually violated. For instance, using a Bernoulli
KL in a relaxed ELBO is no longer a lower bound on the log marginal
probability of the observation. Thus care and early stopping are important.
#### Examples
Creates three continuous distributions, which approximate 3 Bernoullis with
probabilities (0.1, 0.5, 0.4). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedBernoulli(temperature, probs=p)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, whose sigmoid approximate 3 Bernoullis
with logits (-2, 2, 0).
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = Logistic(logits/temperature, 1./temperature)
samples = dist.sample()
sigmoid_samples = tf.sigmoid(samples)
# sigmoid_samples has the same distribution as samples from
# RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very low, samples from
these distributions are almost discrete, usually taking values very close to 0
or 1.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Creates three continuous distributions, which approximate 3 Bernoullis with
logits (-2, 2, 0). Samples from these distributions will be in
the unit interval (0,1). Because the temperature is very high, samples from
these distributions are usually close to the (0.5, 0.5, 0.5) vector.
```python
temperature = 100
logits = [-2, 2, 0]
dist = RelaxedBernoulli(temperature, logits=logits)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedBernoulli"):
"""Construct RelaxedBernoulli distributions.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedBernoulli distributions. The temperature should be
positive.
logits: An N-D `Tensor` representing the log-odds
of a positive event. Each entry in the `Tensor` parametrizes
an independent RelaxedBernoulli distribution where the probability of an
event is sigmoid(logits). Only one of `logits` or `probs` should be
passed in.
probs: An N-D `Tensor` representing the probability of a positive event.
Each entry in the `Tensor` parameterizes an independent Bernoulli
distribution. Only one of `logits` or `probs` should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If both `probs` and `logits` are passed, or if neither.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs, temperature]) as name:
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits=logits, probs=probs, validate_args=validate_args)
super(RelaxedBernoulli, self).__init__(
distribution=logistic.Logistic(
self._logits / self._temperature,
1. / self._temperature,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name + "/Logistic"),
bijector=Sigmoid(validate_args=validate_args),
validate_args=validate_args,
name=name)
self._parameters = parameters
@staticmethod
def _param_shapes(sample_shape):
return {"logits": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def temperature(self):
"""Distribution parameter for the location."""
return self._temperature
@property
def logits(self):
"""Log-odds of `1`."""
return self._logits
@property
def probs(self):
"""Probability of `1`."""
return self._probs
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
import tensorflow_probability as tfp
tfd = tfp.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = dict(locals())
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]) as name:
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
for c in range(self.num_components):
seed = distribution_util.gen_new_seed(seed, "mixture")
samples.append(self.components[c].sample(n, seed=seed))
x = array_ops.stack(
samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask,
axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mixture.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Autoregressive distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Autoregressive(distribution_lib.Distribution):
"""Autoregressive distributions.
The Autoregressive distribution enables learning (often) richer multivariate
distributions by repeatedly applying a [diffeomorphic](
https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as
implemented by `Bijector`s). Regarding terminology,
"Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian." [(Papamakarios et
al., 2016)][1]
In other words, the "autoregressive property" is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
Practically speaking the autoregressive property means that there exists a
permutation of the event coordinates such that each coordinate is a
diffeomorphic function of only preceding coordinates
[(van den Oord et al., 2016)][2].
#### Mathematical Details
The probability function is
```none
prob(x; fn, n) = fn(x).prob(x)
```
And a sample is generated by
```none
x = fn(...fn(fn(x0).sample()).sample()).sample()
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
constructs a `tfp.distributions.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
def normal_fn(self, event_size):
n = event_size * (event_size + 1) / 2
p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n))
affine = tfd.bijectors.Affine(
scale_tril=tfd.fill_triangular(0.25 * p))
def _fn(samples):
scale = math_ops.exp(affine.forward(samples)).eval()
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
batch_and_event_shape = [3, 2, 4]
sample0 = array_ops.zeros(batch_and_event_shape)
ar = autoregressive_lib.Autoregressive(
self._normal_fn(batch_and_event_shape[-1]), sample0)
x = ar.sample([6, 5])
# ==> x.shape = [6, 5, 3, 2, 4]
prob_x = ar.prob(x)
# ==> x.shape = [6, 5, 3, 2]
```
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
distribution_fn,
sample0=None,
num_steps=None,
validate_args=False,
allow_nan_stats=True,
name="Autoregressive"):
"""Construct an `Autoregressive` distribution.
Args:
distribution_fn: Python `callable` which constructs a
`tfp.distributions.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the "autoregressive property",
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of on preceding coordinates.
sample0: Initial input to `distribution_fn`; used to
build the distribution in `__init__` which in turn specifies this
distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`.
If unspecified, then `distribution_fn` should be default constructable.
num_steps: Number of times `distribution_fn` is composed from samples,
e.g., `num_steps=2` implies
`distribution_fn(distribution_fn(sample0).sample(n)).sample()`.
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: "Autoregressive".
Raises:
ValueError: if `num_steps` and
`distribution_fn(sample0).event_shape.num_elements()` are both `None`.
ValueError: if `num_steps < 1`.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
self._distribution_fn = distribution_fn
self._sample0 = sample0
self._distribution0 = (distribution_fn() if sample0 is None
else distribution_fn(sample0))
if num_steps is None:
num_steps = self._distribution0.event_shape.num_elements()
if num_steps is None:
raise ValueError("distribution_fn must generate a distribution "
"with fully known `event_shape`.")
if num_steps < 1:
raise ValueError("num_steps ({}) must be at least 1.".format(num_steps))
self._num_steps = num_steps
super(Autoregressive, self).__init__(
dtype=self._distribution0.dtype,
reparameterization_type=self._distribution0.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=self._distribution0._graph_parents, # pylint: disable=protected-access
name=name)
@property
def distribution_fn(self):
return self._distribution_fn
@property
def sample0(self):
return self._sample0
@property
def num_steps(self):
return self._num_steps
@property
def distribution0(self):
return self._distribution0
def _batch_shape(self):
return self.distribution0.batch_shape
def _batch_shape_tensor(self):
return self.distribution0.batch_shape_tensor()
def _event_shape(self):
return self.distribution0.event_shape
def _event_shape_tensor(self):
return self.distribution0.event_shape_tensor()
def _sample_n(self, n, seed=None):
if seed is None:
seed = distribution_util.gen_new_seed(
seed=np.random.randint(2**32 - 1),
salt="autoregressive")
samples = self.distribution0.sample(n, seed=seed)
for _ in range(self._num_steps):
samples = self.distribution_fn(samples).sample(seed=seed)
return samples
def _log_prob(self, value):
return self.distribution_fn(value).log_prob(value)
def _prob(self, value):
return self.distribution_fn(value).prob(value)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/autoregressive.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
__all__ = [
"SinhArcsinh",
]
class SinhArcsinh(transformed_distribution.TransformedDistribution):
"""The SinhArcsinh transformation of a distribution on `(-inf, inf)`.
This distribution models a random variable, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given random variable `Z`, we define the SinhArcsinh
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation:
```
Y := loc + scale * F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale * Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale * Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="SinhArcsinh"):
"""Construct SinhArcsinh distribution on `(-inf, inf)`.
Arguments `(loc, scale, skewness, tailweight)` must have broadcastable shape
(indexing batch dimensions). They must all have the same `dtype`.
Args:
loc: Floating-point `Tensor`.
scale: `Tensor` of same `dtype` as `loc`.
skewness: Skewness parameter. Default is `0.0` (no skew).
tailweight: Tailweight parameter. Default is `1.0` (unchanged tailweight)
distribution: `tf.Distribution`-like instance. Distribution that is
transformed to produce this distribution.
Default is `tfp.distributions.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a `SinhArcsinh` sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name,
values=[loc, scale, skewness, tailweight]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
dtype = loc.dtype
scale = ops.convert_to_tensor(scale, name="scale", dtype=dtype)
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
tailweight = ops.convert_to_tensor(
tailweight, name="tailweight", dtype=dtype)
skewness = ops.convert_to_tensor(skewness, name="skewness", dtype=dtype)
batch_shape = distribution_util.get_broadcast_shape(
loc, scale, tailweight, skewness)
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
loc = control_flow_ops.with_dependencies(asserts, loc)
# Make the SAS bijector, 'F'.
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight)
# Make the AffineScalar bijector, Z --> loc + scale * Z (2 / F_0(2))
c = 2 * scale / f_noskew.forward(ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.AffineScalar(
shift=loc,
scale=c,
validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(SinhArcsinh, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/sinh_arcsinh.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Geometric distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class Geometric(distribution.Distribution):
"""Geometric distribution.
The Geometric distribution is parameterized by p, the probability of a
positive event. It represents the probability that in k + 1 Bernoulli trials,
the first k trials failed, before seeing a success.
The pmf of this distribution is:
#### Mathematical Details
```none
pmf(k; p) = (1 - p)**k * p
```
where:
* `p` is the success probability, `0 < p <= 1`, and,
* `k` is a non-negative integer.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="Geometric"):
"""Construct Geometric distributions.
Args:
logits: Floating-point `Tensor` with shape `[B1, ..., Bb]` where `b >= 0`
indicates the number of batch dimensions. Each entry represents logits
for the probability of success for independent Geometric distributions
and must be in the range `(-inf, inf]`. Only one of `logits` or `probs`
should be specified.
probs: Positive floating-point `Tensor` with shape `[B1, ..., Bb]`
where `b >= 0` indicates the number of batch dimensions. Each entry
represents the probability of success for independent Geometric
distributions and must be in the range `(0, 1]`. Only one of `logits`
or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(self._probs)] if validate_args else []):
self._probs = array_ops.identity(self._probs, name="probs")
super(Geometric, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._probs, self._logits],
name=name)
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._probs)
def _batch_shape(self):
return self.probs.get_shape()
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
sampled = random_ops.random_uniform(
array_ops.concat([[n], array_ops.shape(self._probs)], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
seed=seed,
dtype=self.dtype)
return math_ops.floor(
math_ops.log(sampled) / math_ops.log1p(-self.probs))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# Whether or not x is integer-form, the following is well-defined.
# However, scipy takes the floor, so we do too.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
return array_ops.where(
x < 0.,
array_ops.zeros_like(x),
-math_ops.expm1((1. + x) * math_ops.log1p(-self.probs)))
def _log_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
else:
# For consistency with cdf, we take the floor.
x = math_ops.floor(x)
x *= array_ops.ones_like(self.probs)
probs = self.probs * array_ops.ones_like(x)
safe_domain = array_ops.where(
math_ops.equal(x, 0.),
array_ops.zeros_like(probs),
probs)
return x * math_ops.log1p(-safe_domain) + math_ops.log(probs)
def _entropy(self):
probs = self._probs
if self.validate_args:
probs = control_flow_ops.with_dependencies(
[check_ops.assert_less(
probs,
constant_op.constant(1., probs.dtype),
message="Entropy is undefined when logits = inf or probs = 1.")],
probs)
# Claim: entropy(p) = softplus(s)/p - s
# where s=logits and p=probs.
#
# Proof:
#
# entropy(p)
# := -[(1-p)log(1-p) + plog(p)]/p
# = -[log(1-p) + plog(p/(1-p))]/p
# = -[-softplus(s) + ps]/p
# = softplus(s)/p - s
#
# since,
# log[1-sigmoid(s)]
# = log[1/(1+exp(s)]
# = -log[1+exp(s)]
# = -softplus(s)
#
# using the fact that,
# 1-sigmoid(s) = sigmoid(-s) = 1/(1+exp(s))
return nn.softplus(self.logits) / probs - self.logits
def _mean(self):
return math_ops.exp(-self.logits)
def _variance(self):
return self._mean() / self.probs
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/geometric.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vector Student's t distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.util import deprecation
class _VectorStudentT(transformed_distribution.TransformedDistribution):
"""A vector version of Student's t-distribution on `R^k`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k
```
where:
* `loc = mu`; a vector in `R^k`,
* `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,
* `Z` denotes the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivariate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name) as name:
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
# pseudocode the basic procedure is:
# if df.batch_shape is scalar:
# if affine.batch_shape is not scalar:
# # broadcast distribution.sample so
# # it has affine.batch_shape.
# self.batch_shape = affine.batch_shape
# else:
# if affine.batch_shape is scalar:
# # let affine broadcasting do its thing.
# self.batch_shape = df.batch_shape
# All of the above magic is actually handled by TransformedDistribution.
# Here we really only need to collect the affine.batch_shape and decide
# what we're going to pass in to TransformedDistribution's
# (override) batch_shape arg.
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=scale_identity_multiplier,
scale_diag=scale_diag,
scale_tril=scale_tril,
scale_perturb_factor=scale_perturb_factor,
scale_perturb_diag=scale_perturb_diag,
validate_args=validate_args)
distribution = student_t.StudentT(
df=df,
loc=array_ops.zeros([], dtype=affine.dtype),
scale=array_ops.ones([], dtype=affine.dtype))
batch_shape, override_event_shape = (
distribution_util.shapes_from_loc_and_scale(
affine.shift, affine.scale))
override_batch_shape = distribution_util.pick_vector(
distribution.is_scalar_batch(),
batch_shape,
constant_op.constant([], dtype=dtypes.int32))
super(_VectorStudentT, self).__init__(
distribution=distribution,
bijector=affine,
batch_shape=override_batch_shape,
event_shape=override_event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self.distribution.df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self.bijector.shift
@property
def scale(self):
"""Dense (batch) covariance matrix, if available."""
return self.bijector.scale
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_student_t.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Deterministic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
__all__ = [
"Deterministic",
"VectorDeterministic",
]
@six.add_metaclass(abc.ABCMeta)
class _BaseDeterministic(distribution.Distribution):
"""Base class for Deterministic distributions."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
is_vector=False,
validate_args=False,
allow_nan_stats=True,
name="_BaseDeterministic"):
"""Initialize a batch of `_BaseDeterministic` distributions.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor`. The point (or batch of points) on which this
distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
is_vector: Python `bool`. If `True`, this is for `VectorDeterministic`,
else `Deterministic`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: If `loc` is a scalar.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, atol, rtol]) as name:
loc = ops.convert_to_tensor(loc, name="loc")
if is_vector and validate_args:
msg = "Argument loc must be at least rank 1."
if loc.get_shape().ndims is not None:
if loc.get_shape().ndims < 1:
raise ValueError(msg)
else:
loc = control_flow_ops.with_dependencies(
[check_ops.assert_rank_at_least(loc, 1, message=msg)], loc)
self._loc = loc
super(_BaseDeterministic, self).__init__(
dtype=self._loc.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
self._atol = self._get_tol(atol)
self._rtol = self._get_tol(rtol)
# Avoid using the large broadcast with self.loc if possible.
if rtol is None:
self._slack = self.atol
else:
self._slack = self.atol + self.rtol * math_ops.abs(self.loc)
def _get_tol(self, tol):
if tol is None:
return ops.convert_to_tensor(0, dtype=self.loc.dtype)
tol = ops.convert_to_tensor(tol, dtype=self.loc.dtype)
if self.validate_args:
tol = control_flow_ops.with_dependencies([
check_ops.assert_non_negative(
tol, message="Argument 'tol' must be non-negative")
], tol)
return tol
@property
def loc(self):
"""Point (or batch of points) at which this distribution is supported."""
return self._loc
@property
def atol(self):
"""Absolute tolerance for comparing points to `self.loc`."""
return self._atol
@property
def rtol(self):
"""Relative tolerance for comparing points to `self.loc`."""
return self._rtol
def _entropy(self):
return array_ops.zeros(self.batch_shape_tensor(), dtype=self.dtype)
def _mean(self):
return array_ops.identity(self.loc)
def _variance(self):
return array_ops.zeros_like(self.loc)
def _mode(self):
return self.mean()
def _sample_n(self, n, seed=None): # pylint: disable=unused-arg
n_static = tensor_util.constant_value(ops.convert_to_tensor(n))
if n_static is not None and self.loc.get_shape().ndims is not None:
ones = [1] * self.loc.get_shape().ndims
multiples = [n_static] + ones
else:
ones = array_ops.ones_like(array_ops.shape(self.loc))
multiples = array_ops.concat(([n], ones), axis=0)
return array_ops.tile(self.loc[array_ops.newaxis, ...], multiples=multiples)
class Deterministic(_BaseDeterministic):
"""Scalar `Deterministic` distribution on the real line.
The scalar `Deterministic` distribution is parameterized by a [batch] point
`loc` on the real line. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) and cumulative distribution function (cdf)
are
```none
pmf(x; loc) = 1, if x == loc, else 0
cdf(x; loc) = 1, if x >= loc, else 0
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single Deterministic supported at zero.
constant = tfd.Deterministic(0.)
constant.prob(0.)
==> 1.
constant.prob(2.)
==> 0.
# Initialize a [2, 2] batch of scalar constants.
loc = [[0., 1.], [2., 3.]]
x = [[0., 1.1], [1.99, 3.]]
constant = tfd.Deterministic(loc)
constant.prob(x)
==> [[1., 0.], [0., 1.]]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="Deterministic"):
"""Initialize a scalar `Deterministic` distribution.
The `atol` and `rtol` parameters allow for some slack in `pmf`, `cdf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if Abs(x - loc) <= atol + rtol * Abs(loc),
= 0, otherwise.
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb]`, with `b >= 0`.
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(Deterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)
def _batch_shape(self):
return self.loc.get_shape()
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _prob(self, x):
return math_ops.cast(
math_ops.abs(x - self.loc) <= self._slack, dtype=self.dtype)
def _cdf(self, x):
return math_ops.cast(x >= self.loc - self._slack, dtype=self.dtype)
class VectorDeterministic(_BaseDeterministic):
"""Vector `Deterministic` distribution on `R^k`.
The `VectorDeterministic` distribution is parameterized by a [batch] point
`loc in R^k`. The distribution is supported at this point only,
and corresponds to a random variable that is constant, equal to `loc`.
See [Degenerate rv](https://en.wikipedia.org/wiki/Degenerate_distribution).
#### Mathematical Details
The probability mass function (pmf) is
```none
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise.
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single VectorDeterministic supported at [0., 2.] in R^2.
constant = tfd.Deterministic([0., 2.])
constant.prob([0., 2.])
==> 1.
constant.prob([0., 3.])
==> 0.
# Initialize a [3] batch of constants on R^2.
loc = [[0., 1.], [2., 3.], [4., 5.]]
constant = tfd.VectorDeterministic(loc)
constant.prob([[0., 1.], [1.9, 3.], [3.99, 5.]])
==> [1., 0., 0.]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
atol=None,
rtol=None,
validate_args=False,
allow_nan_stats=True,
name="VectorDeterministic"):
"""Initialize a `VectorDeterministic` distribution on `R^k`, for `k >= 0`.
Note that there is only one point in `R^0`, the "point" `[]`. So if `k = 0`
then `self.prob([]) == 1`.
The `atol` and `rtol` parameters allow for some slack in `pmf`
computations, e.g. due to floating-point error.
```
pmf(x; loc)
= 1, if All[Abs(x - loc) <= atol + rtol * Abs(loc)],
= 0, otherwise
```
Args:
loc: Numeric `Tensor` of shape `[B1, ..., Bb, k]`, with `b >= 0`, `k >= 0`
The point (or batch of points) on which this distribution is supported.
atol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The absolute tolerance for comparing closeness to `loc`.
Default is `0`.
rtol: Non-negative `Tensor` of same `dtype` as `loc` and broadcastable
shape. The relative tolerance for comparing closeness to `loc`.
Default is `0`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
super(VectorDeterministic, self).__init__(
loc,
atol=atol,
rtol=rtol,
is_vector=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
def _batch_shape_tensor(self):
return array_ops.shape(self.loc)[:-1]
def _batch_shape(self):
return self.loc.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.loc)[-1]
def _event_shape(self):
return self.loc.get_shape()[-1:]
def _prob(self, x):
if self.validate_args:
is_vector_check = check_ops.assert_rank_at_least(x, 1)
right_vec_space_check = check_ops.assert_equal(
self.event_shape_tensor(),
array_ops.gather(array_ops.shape(x), array_ops.rank(x) - 1),
message=
"Argument 'x' not defined in the same space R^k as this distribution")
with ops.control_dependencies([is_vector_check]):
with ops.control_dependencies([right_vec_space_check]):
x = array_ops.identity(x)
return math_ops.cast(
math_ops.reduce_all(math_ops.abs(x - self.loc) <= self._slack, axis=-1),
dtype=self.dtype)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/deterministic.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Local PRNG for amplifying seed entropy into seeds for base operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
class SeedStream(object):
"""Local PRNG for amplifying seed entropy into seeds for base operations.
Writing sampling code which correctly sets the pseudo-random number
generator (PRNG) seed is surprisingly difficult. This class serves as
a helper for the TensorFlow Probability coding pattern designed to
avoid common mistakes.
# Motivating Example
A common first-cut implementation of a sampler for the beta
distribution is to compute the ratio of a gamma with itself plus
another gamma. This code snippet tries to do that, but contains a
surprisingly common error:
```python
def broken_beta(shape, alpha, beta, seed):
x = tf.random.gamma(shape, alpha, seed=seed)
y = tf.random.gamma(shape, beta, seed=seed)
return x / (x + y)
```
The mistake is that the two gamma draws are seeded with the same
seed. This causes them to always produce the same results, which,
in turn, leads this code snippet to always return `0.5`. Because it
can happen across abstraction boundaries, this kind of error is
surprisingly easy to make when handling immutable seeds.
# Goals
TensorFlow Probability adopts a code style designed to eliminate the
above class of error, without exacerbating others. The goals of
this code style are:
- Support reproducibility of results (by encouraging seeding of all
pseudo-random operations).
- Avoid shared-write global state (by not relying on a global PRNG).
- Prevent accidental seed reuse by TF Probability implementers. This
goal is served with the local pseudo-random seed generator provided
in this module.
- Mitigate potential accidental seed reuse by TF Probability clients
(with a salting scheme).
- Prevent accidental resonances with downstream PRNGs (by hashing the
output).
## Non-goals
- Implementing a high-performance PRNG for generating large amounts of
entropy. That's the job of the underlying TensorFlow PRNG we are
seeding.
- Avoiding random seed collisions, aka "birthday attacks".
# Code pattern
```python
def random_beta(shape, alpha, beta, seed): # (a)
seed = SeedStream(seed, salt="random_beta") # (b)
x = tf.random.gamma(shape, alpha, seed=seed()) # (c)
y = tf.random.gamma(shape, beta, seed=seed()) # (c)
return x / (x + y)
```
The elements of this pattern are:
- Accept an explicit seed (line a) as an argument in all public
functions, and write the function to be deterministic (up to any
numerical issues) for fixed seed.
- Rationale: This provides the client with the ability to reproduce
results. Accepting an immutable seed rather than a mutable PRNG
object reduces code coupling, permitting different sections to be
reproducible independently.
- Use that seed only to initialize a local `SeedStream` instance (line b).
- Rationale: Avoids accidental seed reuse.
- Supply the name of the function being implemented as a salt to the
`SeedStream` instance (line b). This serves to keep the salts
unique; unique salts ensure that clients of TF Probability will see
different functions always produce independent results even if
called with the same seeds.
- Seed each callee operation with the output of a unique call to the
`SeedStream` instance (lines c). This ensures reproducibility of
results while preventing seed reuse across callee invocations.
# Why salt?
Salting the `SeedStream` instances (with unique salts) is defensive
programming against a client accidentally committing a mistake
similar to our motivating example. Consider the following situation
that might arise without salting:
```python
def tfp_foo(seed):
seed = SeedStream(seed, salt="")
foo_stuff = tf.random.normal(seed=seed())
...
def tfp_bar(seed):
seed = SeedStream(seed, salt="")
bar_stuff = tf.random.normal(seed=seed())
...
def client_baz(seed):
foo = tfp_foo(seed=seed)
bar = tfp_bar(seed=seed)
...
```
The client should have used different seeds as inputs to `foo` and
`bar`. However, because they didn't, *and because `foo` and `bar`
both sample a Gaussian internally as their first action*, the
internal `foo_stuff` and `bar_stuff` will be the same, and the
returned `foo` and `bar` will not be independent, leading to subtly
incorrect answers from the client's simulation. This kind of bug is
particularly insidious for the client, because it depends on a
Distributions implementation detail, namely the order in which `foo`
and `bar` invoke the samplers they depend on. In particular, a
Bayesflow team member can introduce such a bug in previously
(accidentally) correct client code by performing an internal
refactoring that causes this operation order alignment.
A salting discipline eliminates this problem by making sure that the
seeds seen by `foo`'s callees will differ from those seen by `bar`'s
callees, even if `foo` and `bar` are invoked with the same input
seed.
"""
def __init__(self, seed, salt):
"""Initializes a `SeedStream`.
Args:
seed: Any Python object convertible to string, supplying the
initial entropy. If `None`, operations seeded with seeds
drawn from this `SeedStream` will follow TensorFlow semantics
for not being seeded.
salt: Any Python object convertible to string, supplying
auxiliary entropy. Must be unique across the Distributions
and TensorFlow Probability code base. See class docstring for
rationale.
"""
self._seed = seed.original_seed if isinstance(seed, SeedStream) else seed
self._salt = salt
self._counter = 0
def __call__(self):
"""Returns a fresh integer usable as a seed in downstream operations.
If this `SeedStream` was initialized with `seed=None`, returns
`None`. This has the effect that downstream operations (both
`SeedStream`s and primitive TensorFlow ops) will behave as though
they were unseeded.
The returned integer is non-negative, and uniformly distributed in
the half-open interval `[0, 2**512)`. This is consistent with
TensorFlow, as TensorFlow operations internally use the residue of
the given seed modulo `2**31 - 1` (see
`tensorflow/python/framework/random_seed.py`).
Returns:
seed: A fresh integer usable as a seed in downstream operations,
or `None`.
"""
self._counter += 1
if self._seed is None:
return None
composite = str((self._seed, self._counter, self._salt)).encode("utf-8")
return int(hashlib.sha512(composite).hexdigest(), 16)
@property
def original_seed(self):
return self._seed
@property
def salt(self):
return self._salt
# Design rationales for the SeedStream class
#
# - Salts are accepted for the reason given above to supply them.
#
# - A `None` seed propagates to downstream seeds, so they exhibit
# their "unseeded" behavior.
#
# - The return value is a Python int so it can be passed directly to
# TensorFlow operations as a seed. It is large to avoid losing seed
# space needlessly (TF will internally read only the last 31 bits).
#
# - The output is hashed with a crypto-grade hash function as a form
# of defensive programming: this reliably prevents all possible
# accidental resonances with all possible downstream PRNGs. The
# specific function used is not important; SHA512 was ready to hand.
#
# - The internal state update is a simple counter because (a) given
# that the output is hashed anyway, this is enough, and (b) letting
# it be this predictable permits a future "generate many seeds in
# parallel" operation whose results would agree with running
# sequentially.
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/seed_stream.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution of a vectorized Exponential, with uncorrelated components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import vector_exponential_linear_operator as vector_exponential_linop
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
__all__ = [
"VectorExponentialDiag",
]
class VectorExponentialDiag(
vector_exponential_linop.VectorExponentialLinearOperator):
"""The vectorization of the Exponential distribution on `R^k`.
The vector exponential distribution is defined over a subset of `R^k`, and
parameterized by a (batch of) length-`k` `loc` vector and a (batch of) `k x k`
`scale` matrix: `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is defined over the image of the
`scale` matrix + `loc`, applied to the positive half-space:
`Supp = {loc + scale @ x : x in R^k, x_1 > 0, ..., x_k > 0}`. On this set,
```none
pdf(y; loc, scale) = exp(-||x||_1) / Z, for y in Supp
x = inv(scale) @ (y - loc),
Z = |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||x||_1` denotes the `l1` norm of `x`, `sum_i |x_i|`.
The VectorExponential distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Exponential(rate=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorExponential` and `Vector` distributions in TensorFlow.
The `VectorExponential` is a non-standard distribution that has useful
properties.
The marginals `Y_1, ..., Y_k` are *not* Exponential random variables, due to
the fact that the sum of Exponential random variables is not Exponential.
Instead, `Y` is a vector whose components are linear combinations of
Exponential random variables. Thus, `Y` lives in the vector space generated
by `vectors` of Exponential distributions. This allows the user to decide the
mean and covariance (by setting `loc` and `scale`), while preserving some
properties of the Exponential distribution. In particular, the tails of `Y_i`
will be (up to polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Exponential random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorExponential, supported on
# {(x, y) in R^2 : x > 0, y > 0}.
# The first component has pdf exp{-x}, the second 0.5 exp{-x / 2}
vex = tfd.VectorExponentialDiag(scale_diag=[1., 2.])
# Compute the pdf of an`R^2` observation; return a scalar.
vex.prob([3., 4.]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Exponential's.
loc = [[1., 2, 3],
[1., 0, 0]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vex = tfd.VectorExponentialDiag(loc, scale_diag)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[1.9, 2.2, 3.1],
[10., 1.0, 9.0]] # shape: [2, 3]
vex.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="VectorExponentialDiag"):
"""Construct Vector Exponential distribution supported on a subset of `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(VectorExponentialDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_exponential_diag.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conditional distribution base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class ConditionalDistribution(distribution.Distribution):
"""Distribution that supports intrinsic parameters (local latents).
Subclasses of this distribution may have additional keyword arguments passed
to their sample-based methods (i.e. `sample`, `log_prob`, etc.).
"""
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
return self._call_sample_n(sample_shape, seed, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_prob(self, value, name="log_prob", **condition_kwargs):
return self._call_log_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def prob(self, value, name="prob", **condition_kwargs):
return self._call_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
return self._call_log_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def cdf(self, value, name="cdf", **condition_kwargs):
return self._call_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
return self._call_log_survival_function(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def survival_function(self, value, name="survival_function",
**condition_kwargs):
return self._call_survival_function(value, name, **condition_kwargs)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/conditional_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalTriL",
]
class MultivariateNormalTriL(mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a matrix in `R^{k x k}`, `covariance = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero diagonal,
i.e., `tf.linalg.tensor_diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
Trainable (batch) lower-triangular matrices can be created with
`tfp.distributions.matrix_diag_transform()` and/or
`tfp.distributions.fill_triangular()`
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.linalg.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=scale)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
tril = ... # shape: [2, 3, 3], lower triangular, non-zero diagonal.
mvn = tfd.MultivariateNormalTriL(
loc=mu,
scale_tril=tril)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
# Instantiate a "learnable" MVN.
dims = 4
with tf.compat.v1.variable_scope("model"):
mvn = tfd.MultivariateNormalTriL(
loc=tf.compat.v1.get_variable(shape=[dims], dtype=tf.float32,
name="mu"),
scale_tril=tfd.fill_triangular(
tf.compat.v1.get_variable(shape=[dims * (dims + 1) / 2],
dtype=tf.float32, name="chol_Sigma")))
```
"""
@deprecation.deprecated(
"2018-10-01", "The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_tril=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalTriL"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = scale_tril
```
where `scale_tril` is lower-triangular `k x k` matrix with non-zero
diagonal, i.e., `tf.linalg.tensor_diag_part(scale_tril) != 0`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_tril: Floating-point, lower-triangular `Tensor` with non-zero
diagonal elements. `scale_tril` has shape `[B1, ..., Bb, k, k]` where `b
>= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `scale_tril` are specified.
"""
parameters = dict(locals())
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
if loc is None and scale_tril is None:
raise ValueError("Must specify one or both of `loc`, `scale_tril`.")
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, scale_tril]):
loc = _convert_to_tensor(loc, name="loc")
scale_tril = _convert_to_tensor(scale_tril, name="scale_tril")
if scale_tril is None:
scale = linalg.LinearOperatorIdentity(
num_rows=distribution_util.dimension_size(loc, -1),
dtype=loc.dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
else:
# No need to validate that scale_tril is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular
# method that is called by the Bijector.
scale = linalg.LinearOperatorLowerTriangular(
scale_tril,
is_non_singular=True,
is_self_adjoint=False,
is_positive_definite=False)
super(MultivariateNormalTriL, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mvn_tril.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Poisson distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"Poisson",
]
_poisson_sample_note = """
The Poisson distribution is technically only defined for non-negative integer
values. When `validate_args=False`, non-integral inputs trigger an assertion.
When `validate_args=False` calculations are otherwise unchanged despite
integral or non-integral inputs.
When `validate_args=False`, evaluating the pmf at non-integral values,
corresponds to evaluations of an unnormalized distribution, that does not
correspond to evaluations of the cdf.
"""
class Poisson(distribution.Distribution):
"""Poisson distribution.
The Poisson distribution is parameterized by an event `rate` parameter.
#### Mathematical Details
The probability mass function (pmf) is,
```none
pmf(k; lambda, k >= 0) = (lambda^k / k!) / Z
Z = exp(lambda).
```
where `rate = lambda` and `Z` is the normalizing constant.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
rate=None,
log_rate=None,
validate_args=False,
allow_nan_stats=True,
name="Poisson"):
"""Initialize a batch of Poisson distributions.
Args:
rate: Floating point tensor, the rate parameter. `rate` must be positive.
Must specify exactly one of `rate` and `log_rate`.
log_rate: Floating point tensor, the log of the rate parameter.
Must specify exactly one of `rate` and `log_rate`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if none or both of `rate`, `log_rate` are specified.
TypeError: if `rate` is not a float-type.
TypeError: if `log_rate` is not a float-type.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[rate]) as name:
if (rate is None) == (log_rate is None):
raise ValueError("Must specify exactly one of `rate` and `log_rate`.")
elif log_rate is None:
rate = ops.convert_to_tensor(rate, name="rate")
if not rate.dtype.is_floating:
raise TypeError("rate.dtype ({}) is a not a float-type.".format(
rate.dtype.name))
with ops.control_dependencies([check_ops.assert_positive(rate)] if
validate_args else []):
self._rate = array_ops.identity(rate, name="rate")
self._log_rate = math_ops.log(rate, name="log_rate")
else:
log_rate = ops.convert_to_tensor(log_rate, name="log_rate")
if not log_rate.dtype.is_floating:
raise TypeError("log_rate.dtype ({}) is a not a float-type.".format(
log_rate.dtype.name))
self._rate = math_ops.exp(log_rate, name="rate")
self._log_rate = ops.convert_to_tensor(log_rate, name="log_rate")
super(Poisson, self).__init__(
dtype=self._rate.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._rate],
name=name)
@property
def rate(self):
"""Rate parameter."""
return self._rate
@property
def log_rate(self):
"""Log rate parameter."""
return self._log_rate
def _batch_shape_tensor(self):
return array_ops.shape(self.rate)
def _batch_shape(self):
return self.rate.shape
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.igammac(1. + x, self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return x * self.log_rate - math_ops.lgamma(1. + x)
def _mean(self):
return array_ops.identity(self.rate)
def _variance(self):
return array_ops.identity(self.rate)
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _sample_n(self, n, seed=None):
return random_ops.random_poisson(
self.rate, [n], dtype=self.dtype, seed=seed)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/poisson.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vectorized Laplace distribution class, directly using LinearOperator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import laplace
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceLinearOperator"
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
class VectorLaplaceLinearOperator(
transformed_distribution.TransformedDistribution):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate VectorLaplace with some desired covariance.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
# Divide scale by sqrt(2) so that the final covariance will be what we want.
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale / tf.sqrt(2.)))
# Covariance agrees with cholesky(cov) parameterization.
vla.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
vla.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Vector Laplace's.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
vla = tfd.VectorLaplaceLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceLinearOperator"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents):
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(VectorLaplaceLinearOperator, self).__init__(
distribution=laplace.Laplace(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=bijectors.AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(VectorLaplaceLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(VectorLaplaceLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
# Let
# W = (w1,...,wk), with wj ~ iid Laplace(0, 1).
# Then this distribution is
# X = loc + LW,
# and since E[X] = loc,
# Cov(X) = E[LW W^T L^T] = L E[W W^T] L^T.
# Since E[wi wj] = 0 if i != j, and 2 if i == j, we have
# Cov(X) = 2 LL^T
if distribution_util.is_diagonal_scale(self.scale):
return 2. * array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return 2. * self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return np.sqrt(2) * math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return np.sqrt(2) * math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_laplace_linear_operator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops.bijectors import AffineLinearOperator
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.linalg import linalg
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalLinearOperator",
]
_mvn_sample_note = """
`value` is a batch vector with compatible shape if `value` is a `Tensor` whose
shape can be broadcast up to either:
```python
self.batch_shape + self.event_shape
```
or
```python
[M1, ..., Mm] + self.batch_shape + self.event_shape
```
"""
# TODO(b/35290280): Import in `../../__init__.py` after adding unit-tests.
class MultivariateNormalLinearOperator(
transformed_distribution.TransformedDistribution):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
scale = tf.cholesky(cov)
# ==> [[ 0.6, 0. , 0. ],
# [ 0.2, 0.5, 0. ],
# [ 0.1, -0.3, 0.4]])
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorLowerTriangular(scale))
# Covariance agrees with cholesky(cov) parameterization.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an`R^3` observation; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag = [[1., 2, 3],
[0.5, 1, 1.5]] # shape: [2, 3]
mvn = tfd.MultivariateNormalLinearOperator(
loc=mu,
scale=tf.linalg.LinearOperatorDiag(scale_diag))
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape
`[B1, ..., Bb, k, k]`.
validate_args: Python `bool`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
ValueError: if `scale` is unspecified.
TypeError: if not `scale.dtype.is_floating`
"""
parameters = dict(locals())
if scale is None:
raise ValueError("Missing required `scale` parameter.")
if not scale.dtype.is_floating:
raise TypeError("`scale` parameter must have floating-point dtype.")
with ops.name_scope(name, values=[loc] + scale.graph_parents) as name:
# Since expand_dims doesn't preserve constant-ness, we obtain the
# non-dynamic value if possible.
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale)
super(MultivariateNormalLinearOperator, self).__init__(
distribution=normal.Normal(
loc=array_ops.zeros([], dtype=scale.dtype),
scale=array_ops.ones([], dtype=scale.dtype)),
bijector=AffineLinearOperator(
shift=loc, scale=scale, validate_args=validate_args),
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def loc(self):
"""The `loc` `Tensor` in `Y = scale @ X + loc`."""
return self.bijector.shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + loc`."""
return self.bijector.scale
@distribution_util.AppendDocstring(_mvn_sample_note)
def _log_prob(self, x):
return super(MultivariateNormalLinearOperator, self)._log_prob(x)
@distribution_util.AppendDocstring(_mvn_sample_note)
def _prob(self, x):
return super(MultivariateNormalLinearOperator, self)._prob(x)
def _mean(self):
shape = self.batch_shape.concatenate(self.event_shape)
has_static_shape = shape.is_fully_defined()
if not has_static_shape:
shape = array_ops.concat([
self.batch_shape_tensor(),
self.event_shape_tensor(),
], 0)
if self.loc is None:
return array_ops.zeros(shape, self.dtype)
if has_static_shape and shape == self.loc.get_shape():
return array_ops.identity(self.loc)
# Add dummy tensor of zeros to broadcast. This is only necessary if shape
# != self.loc.shape, but we could not determine if this is the case.
return array_ops.identity(self.loc) + array_ops.zeros(shape, self.dtype)
def _covariance(self):
if distribution_util.is_diagonal_scale(self.scale):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.square(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense()))
else:
return array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
def _stddev(self):
if distribution_util.is_diagonal_scale(self.scale):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense())))
else:
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))
def _mode(self):
return self._mean()
@kullback_leibler.RegisterKL(MultivariateNormalLinearOperator,
MultivariateNormalLinearOperator)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_brute_force(a, b, name=None):
"""Batched KL divergence `KL(a || b)` for multivariate Normals.
With `X`, `Y` both multivariate Normals in `R^k` with means `mu_a`, `mu_b` and
covariance `C_a`, `C_b` respectively,
```
KL(a || b) = 0.5 * ( L - k + T + Q ),
L := Log[Det(C_b)] - Log[Det(C_a)]
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k**2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
a: Instance of `MultivariateNormalLinearOperator`.
b: Instance of `MultivariateNormalLinearOperator`.
name: (optional) name to use for created ops. Default "kl_mvn".
Returns:
Batchwise `KL(a || b)`.
"""
def squared_frobenius_norm(x):
"""Helper to make KL calculation slightly more readable."""
# http://mathworld.wolfram.com/FrobeniusNorm.html
# The gradient of KL[p,q] is not defined when p==q. The culprit is
# linalg_ops.norm, i.e., we cannot use the commented out code.
# return math_ops.square(linalg_ops.norm(x, ord="fro", axis=[-2, -1]))
return math_ops.reduce_sum(math_ops.square(x), axis=[-2, -1])
# TODO(b/35041439): See also b/35040945. Remove this function once LinOp
# supports something like:
# A.inverse().solve(B).norm(order='fro', axis=[-1, -2])
def is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, linalg.LinearOperatorIdentity) or
isinstance(x, linalg.LinearOperatorScaledIdentity) or
isinstance(x, linalg.LinearOperatorDiag))
with ops.name_scope(name, "kl_mvn", values=[a.loc, b.loc] +
a.scale.graph_parents + b.scale.graph_parents):
# Calculation is based on:
# http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
# and,
# https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
# i.e.,
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ij} (inv(B) A)_{ij}**2
# = ||inv(B) A||_F**2
# where ||.||_F is the Frobenius norm and the second equality follows from
# the cyclic permutation property.
if is_diagonal(a.scale) and is_diagonal(b.scale):
# Using `stddev` because it handles expansion of Identity cases.
b_inv_a = (a.stddev() / b.stddev())[..., array_ops.newaxis]
else:
b_inv_a = b.scale.solve(a.scale.to_dense())
kl_div = (b.scale.log_abs_determinant()
- a.scale.log_abs_determinant()
+ 0.5 * (
- math_ops.cast(a.scale.domain_dimension_tensor(), a.dtype)
+ squared_frobenius_norm(b_inv_a)
+ squared_frobenius_norm(b.scale.solve(
(b.mean() - a.mean())[..., array_ops.newaxis]))))
kl_div.set_shape(array_ops.broadcast_static_shape(
a.batch_shape, b.batch_shape))
return kl_div
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/mvn_linear_operator.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Logistic distribution.
dist = tfd.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Arguments are broadcast when possible.
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return -nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/logistic.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Kumaraswamy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import uniform
from tensorflow.python.util import deprecation
__all__ = [
"Kumaraswamy",
]
_kumaraswamy_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _harmonic_number(x):
"""Compute the harmonic number from its analytic continuation.
Derivation from [here](
https://en.wikipedia.org/wiki/Digamma_function#Relation_to_harmonic_numbers)
and [Euler's constant](
https://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant).
Args:
x: input float.
Returns:
z: The analytic continuation of the harmonic number for the input.
"""
one = array_ops.ones([], dtype=x.dtype)
return math_ops.digamma(x + one) - math_ops.digamma(one)
class Kumaraswamy(transformed_distribution.TransformedDistribution):
"""Kumaraswamy distribution.
The Kumaraswamy distribution is defined over the `(0, 1)` interval using
parameters
`concentration1` (aka "alpha") and `concentration0` (aka "beta"). It has a
shape similar to the Beta distribution, but is reparameterizeable.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = alpha * beta * x**(alpha - 1) * (1 - x**alpha)**(beta -
1)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
Distribution parameters are automatically broadcast in all functions; see
examples for details.
#### Examples
```python
# Create a batch of three Kumaraswamy distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = Kumaraswamy(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = Kumaraswamy(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
concentration1=None,
concentration0=None,
validate_args=False,
allow_nan_stats=True,
name="Kumaraswamy"):
"""Initialize a batch of Kumaraswamy distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka "alpha". Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka "beta". Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
with ops.name_scope(name, values=[concentration1, concentration0]) as name:
concentration1 = ops.convert_to_tensor(
concentration1, name="concentration1")
concentration0 = ops.convert_to_tensor(
concentration0, name="concentration0")
super(Kumaraswamy, self).__init__(
distribution=uniform.Uniform(
low=array_ops.zeros([], dtype=concentration1.dtype),
high=array_ops.ones([], dtype=concentration1.dtype),
allow_nan_stats=allow_nan_stats),
bijector=bijectors.Kumaraswamy(
concentration1=concentration1, concentration0=concentration0,
validate_args=validate_args),
batch_shape=distribution_util.get_broadcast_shape(
concentration1, concentration0),
name=name)
self._reparameterization_type = distribution.FULLY_REPARAMETERIZED
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self.bijector.concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self.bijector.concentration0
def _entropy(self):
a = self.concentration1
b = self.concentration0
return (1 - 1. / a) + (
1 - 1. / b) * _harmonic_number(b) + math_ops.log(a) + math_ops.log(b)
def _moment(self, n):
"""Compute the n'th (uncentered) moment."""
total_concentration = self.concentration1 + self.concentration0
expanded_concentration1 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration1
expanded_concentration0 = array_ops.ones_like(
total_concentration, dtype=self.dtype) * self.concentration0
beta_arg0 = 1 + n / expanded_concentration1
beta_arg = array_ops.stack([beta_arg0, expanded_concentration0], -1)
log_moment = math_ops.log(expanded_concentration0) + special_math_ops.lbeta(
beta_arg)
return math_ops.exp(log_moment)
def _mean(self):
return self._moment(1)
def _variance(self):
# TODO(b/72696533): Investigate a more numerically stable version.
return self._moment(2) - math_ops.square(self._moment(1))
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
a = self.concentration1
b = self.concentration0
mode = ((a - 1) / (a * b - 1))**(1. / a)
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype),
name="nan")
is_defined = (self.concentration1 > 1.) & (self.concentration0 > 1.)
return array_ops.where(is_defined, mode, nan)
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration1.dtype),
self.concentration1,
message="Mode undefined for concentration1 <= 1."),
check_ops.assert_less(
array_ops.ones([], dtype=self.concentration0.dtype),
self.concentration0,
message="Mode undefined for concentration0 <= 1.")
], mode)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/kumaraswamy.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution of a vectorized Laplace, with uncorrelated components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import vector_laplace_linear_operator as vector_laplace_linop
from tensorflow.python.framework import ops
from tensorflow.python.util import deprecation
__all__ = [
"VectorLaplaceDiag",
]
class VectorLaplaceDiag(
vector_laplace_linop.VectorLaplaceLinearOperator):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 2-variate VectorLaplace.
vla = tfd.VectorLaplaceDiag(
loc=[1., -1],
scale_diag=[1, 2.])
vla.mean().eval()
# ==> [1., -1]
vla.stddev().eval()
# ==> [1., 2] * sqrt(2)
# Evaluate this on an observation in `R^2`, returning a scalar.
vla.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity VectorLaplace.
vla = tfd.VectorLaplaceDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
vla.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
vla.stddev().eval() # shape: [3, 2]
# ==> sqrt(2) * [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
vla.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate VectorLaplace's.
vla = tfd.VectorLaplaceDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
vla.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceDiag"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = dict(locals())
with ops.name_scope(name):
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(VectorLaplaceDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Half Normal distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
__all__ = [
"HalfNormal",
]
class HalfNormal(distribution.Distribution):
"""The Half Normal distribution with scale `scale`.
#### Mathematical details
The half normal is a transformation of a centered normal distribution.
If some random variable `X` has normal distribution,
```none
X ~ Normal(0.0, scale)
Y = |X|
```
Then `Y` will have half normal distribution. The probability density
function (pdf) is:
```none
pdf(x; scale, x > 0) = sqrt(2) / (scale * sqrt(pi)) *
exp(- 1/2 * (x / scale) ** 2)
)
```
Where `scale = sigma` is the standard deviation of the underlying normal
distribution.
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar HalfNormal distribution.
dist = tfd.HalfNormal(scale=3.0)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued HalfNormals.
# The first has scale 11.0, the second 22.0
dist = tfd.HalfNormal(scale=[11.0, 22.0])
# Evaluate the pdf of the first distribution on 1.0, and the second on 1.5,
# returning a length two tensor.
dist.prob([1.0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
scale,
validate_args=False,
allow_nan_stats=True,
name="HalfNormal"):
"""Construct HalfNormals with scale `scale`.
Args:
scale: Floating point tensor; the scales of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._scale = array_ops.identity(scale, name="scale")
super(HalfNormal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return {"scale": ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)}
@property
def scale(self):
"""Distribution parameter for the scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.shape(self.scale)
def _batch_shape(self):
return self.scale.shape
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.dtype, seed=seed)
return math_ops.abs(sampled * self.scale)
def _prob(self, x):
coeff = np.sqrt(2) / self.scale / np.sqrt(np.pi)
pdf = coeff * math_ops.exp(- 0.5 * (x / self.scale) ** 2)
return pdf * math_ops.cast(x >= 0, self.dtype)
def _cdf(self, x):
truncated_x = nn.relu(x)
return math_ops.erf(truncated_x / self.scale / np.sqrt(2.0))
def _entropy(self):
return 0.5 * math_ops.log(np.pi * self.scale ** 2.0 / 2.0) + 0.5
def _mean(self):
return self.scale * np.sqrt(2.0) / np.sqrt(np.pi)
def _quantile(self, p):
return np.sqrt(2.0) * self.scale * special_math.erfinv(p)
def _mode(self):
return array_ops.zeros(self.batch_shape_tensor())
def _variance(self):
return self.scale ** 2.0 * (1.0 - 2.0 / np.pi)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/half_normal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing moving statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
__all__ = [
"assign_moving_mean_variance",
"assign_log_moving_mean_exp",
"moving_mean_variance",
]
def assign_moving_mean_variance(
mean_var, variance_var, value, decay, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The `value` updated exponentially weighted moving `mean_var` and
`variance_var` are given by the following recurrence relations:
```python
variance_var = decay * (variance_var + (1-decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-1 mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Args:
mean_var: `float`-like `Variable` representing the exponentially weighted
moving mean. Same shape as `variance_var` and `value`.
variance_var: `float`-like `Variable` representing the
exponentially weighted moving variance. Same shape as `mean_var` and
`value`.
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `mean_var` does not have float type `dtype`.
TypeError: if `mean_var`, `variance_var`, `value`, `decay` have different
`base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
with ops.name_scope(name, "assign_moving_mean_variance",
[variance_var, mean_var, value, decay]):
with ops.colocate_with(variance_var):
with ops.colocate_with(mean_var):
base_dtype = mean_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"mean_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
if base_dtype != variance_var.dtype.base_dtype:
raise TypeError(
"mean_var.base_dtype({}) != variance_var.base_dtype({})".format(
base_dtype.name,
variance_var.dtype.base_dtype.name))
value = ops.convert_to_tensor(value, dtype=base_dtype, name="value")
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
delta = value - mean_var
with ops.control_dependencies([delta]):
mean_var = state_ops.assign_add(
mean_var,
(1. - decay) * delta)
variance_var = state_ops.assign_sub(
variance_var,
(1. - decay) * (variance_var - decay * math_ops.square(delta)))
return mean_var, variance_var
def assign_log_moving_mean_exp(
log_mean_exp_var, log_value, decay, name=None):
"""Compute the log of the exponentially weighted moving mean of the exp.
If `log_value` is a draw from a stationary random variable, this function
approximates `log(E[exp(log_value)])`, i.e., a weighted log-sum-exp. More
precisely, a `tf.Variable`, `log_mean_exp_var`, is updated by `log_value`
using the following identity:
```none
log_mean_exp_var =
= log(decay exp(log_mean_exp_var) + (1 - decay) exp(log_value))
= log(exp(log_mean_exp_var + log(decay)) + exp(log_value + log1p(-decay)))
= log_mean_exp_var
+ log( exp(log_mean_exp_var - log_mean_exp_var + log(decay))
+ exp(log_value - log_mean_exp_var + log1p(-decay)))
= log_mean_exp_var
+ log_sum_exp([log(decay), log_value - log_mean_exp_var + log1p(-decay)]).
```
In addition to numerical stability, this formulation is advantageous because
`log_mean_exp_var` can be updated in a lock-free manner, i.e., using
`assign_add`. (Note: the updates are not thread-safe; it's just that the
update to the tf.Variable is presumed efficient due to being lock-free.)
Args:
log_mean_exp_var: `float`-like `Variable` representing the log of the
exponentially weighted moving mean of the exp. Same shape as `log_value`.
log_value: `float`-like `Tensor` representing a new (streaming) observation.
Same shape as `log_mean_exp_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
name: Optional name of the returned operation.
Returns:
log_mean_exp_var: A reference to the input 'Variable' tensor with the
`log_value`-updated log of the exponentially weighted moving mean of exp.
Raises:
TypeError: if `log_mean_exp_var` does not have float type `dtype`.
TypeError: if `log_mean_exp_var`, `log_value`, `decay` have different
`base_dtype`.
"""
with ops.name_scope(name, "assign_log_moving_mean_exp",
[log_mean_exp_var, log_value, decay]):
# We want to update the variable in a numerically stable and lock-free way.
# To do this, observe that variable `x` updated by `v` is:
# x = log(w exp(x) + (1-w) exp(v))
# = log(exp(x + log(w)) + exp(v + log1p(-w)))
# = x + log(exp(x - x + log(w)) + exp(v - x + log1p(-w)))
# = x + lse([log(w), v - x + log1p(-w)])
with ops.colocate_with(log_mean_exp_var):
base_dtype = log_mean_exp_var.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"log_mean_exp_var.base_dtype({}) does not have float type "
"`dtype`.".format(base_dtype.name))
log_value = ops.convert_to_tensor(log_value, dtype=base_dtype,
name="log_value")
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
delta = (log_value - log_mean_exp_var)[array_ops.newaxis, ...]
x = array_ops.concat([
math_ops.log(decay) * array_ops.ones_like(delta),
delta + math_ops.log1p(-decay)
], axis=0)
x = math_ops.reduce_logsumexp(x, axis=0)
return log_mean_exp_var.assign_add(x)
def moving_mean_variance(value, decay, collections=None, name=None):
"""Compute exponentially weighted moving {mean,variance} of a streaming value.
The exponentially-weighting moving `mean_var` and `variance_var` are updated
by `value` according to the following recurrence:
```python
variance_var = decay * (variance_var + (1-decay) * (value - mean_var)**2)
mean_var = decay * mean_var + (1 - decay) * value
```
Note: `mean_var` is updated *after* `variance_var`, i.e., `variance_var` uses
the lag-`1` mean.
For derivation justification, see [Finch (2009; Eq. 143)][1].
Unlike `assign_moving_mean_variance`, this function handles
variable creation.
Args:
value: `float`-like `Tensor`. Same shape as `mean_var` and `variance_var`.
decay: A `float`-like `Tensor`. The moving mean decay. Typically close to
`1.`, e.g., `0.999`.
collections: Python list of graph-collections keys to which the internal
variables `mean_var` and `variance_var` are added.
Default value is `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Returns:
mean_var: `Variable` representing the `value`-updated exponentially weighted
moving mean.
variance_var: `Variable` representing the `value`-updated
exponentially weighted moving variance.
Raises:
TypeError: if `value_var` does not have float type `dtype`.
TypeError: if `value`, `decay` have different `base_dtype`.
#### References
[1]: Tony Finch. Incremental calculation of weighted mean and variance.
_Technical Report_, 2009.
http://people.ds.cam.ac.uk/fanf2/hermes/doc/antiforgery/stats.pdf
"""
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(
name, "moving_mean_variance", [value, decay]):
value = ops.convert_to_tensor(value, name="value")
base_dtype = value.dtype.base_dtype
if not base_dtype.is_floating:
raise TypeError(
"value.base_dtype({}) does not have float type `dtype`.".format(
base_dtype.name))
decay = ops.convert_to_tensor(decay, dtype=base_dtype, name="decay")
variance_var = variable_scope.get_variable(
"moving_variance",
shape=value.shape,
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
mean_var = variable_scope.get_variable(
"moving_mean",
shape=value.shape,
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
return assign_moving_mean_variance(
mean_var, variance_var, value, decay)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/moving_stats.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops.bijectors.exp import Exp
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import categorical as categorical_lib
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_lib
from tensorflow.python.util import deprecation
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = ops.convert_to_tensor(probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., array_ops.newaxis]
+ np.sqrt(2.) * scale[..., array_ops.newaxis] * grid)
return grid, probs
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with ops.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
# Create a LogNormal distribution.
dist = transformed_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=loc, scale=scale),
bijector=Exp(),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = array_ops.shape(dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = array_ops.zeros([], dtype=dist.dtype)
edges = math_ops.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = array_ops.reshape(edges, shape=array_ops.concat([
[-1], array_ops.ones([batch_ndims], dtype=dtypes.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = array_ops.concat([
math_ops.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = array_ops.transpose(quantiles, perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = array_ops.fill(
dims=[quadrature_size],
value=1. / math_ops.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution_lib.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
if loc is not None:
loc = ops.convert_to_tensor(loc, name="loc")
if scale is not None:
scale = ops.convert_to_tensor(
scale, dtype=None if loc is None else loc.dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson_lib.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical_lib.Categorical(
logits=math_ops.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=distribution_lib.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with quadrature param."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a quadrature grid."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
array_ops.shape(self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = math_ops.reduce_prod(self.batch_shape_tensor())
# We need to "sample extra" from the mixture distribution if it doesn't
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=distribution_util.gen_new_seed(
seed, "poisson_lognormal_quadrature_compound"))
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = array_ops.reshape(ids, shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.is_scalar_batch(),
np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = math_ops.range(start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = array_ops.gather(
array_ops.reshape(self.distribution.rate, shape=[-1]), ids)
rate = array_ops.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return random_ops.random_poisson(
lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return math_ops.reduce_logsumexp(
(self.mixture_distribution.logits
+ self.distribution.log_prob(x[..., array_ops.newaxis])),
axis=-1)
def _mean(self):
return math_ops.exp(
math_ops.reduce_logsumexp(
self.mixture_distribution.logits + self.distribution.log_rate,
axis=-1))
def _variance(self):
return math_ops.exp(self._log_variance())
def _stddev(self):
return math_ops.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = array_ops.stack([
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self.distribution.log_rate,
# log((Mean[d] - Mean)**2)
2. * math_ops.log(
math_ops.abs(self.distribution.mean()
- self._mean()[..., array_ops.newaxis])),
], axis=-1)
return math_ops.reduce_logsumexp(
self.mixture_distribution.logits[..., array_ops.newaxis] + v,
axis=[-2, -1])
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [distribution_util.static_value(x) for x in args]
if any(vec is None for vec in args_):
return array_ops.concat(args, axis=0)
return [val for vec in args_ for val in vec]
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/poisson_lognormal.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Independent distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.util import deprecation
class Independent(distribution_lib.Distribution):
"""Independent distribution from batch of distributions.
This distribution is useful for regarding a collection of independent,
non-identical distributions as a single random variable. For example, the
`Independent` distribution composed of a collection of `Bernoulli`
distributions might define a distribution over an image (where each
`Bernoulli` is a distribution over each pixel).
More precisely, a collection of `B` (independent) `E`-variate random variables
(rv) `{X_1, ..., X_B}`, can be regarded as a `[B, E]`-variate random variable
`(X_1, ..., X_B)` with probability
`p(x_1, ..., x_B) = p_1(x_1) * ... * p_B(x_B)` where `p_b(X_b)` is the
probability of the `b`-th rv. More generally `B, E` can be arbitrary shapes.
Similarly, the `Independent` distribution specifies a distribution over `[B,
E]`-shaped events. It operates by reinterpreting the rightmost batch dims as
part of the event dimensions. The `reinterpreted_batch_ndims` parameter
controls the number of batch dims which are absorbed as event dims;
`reinterpreted_batch_ndims < len(batch_shape)`. For example, the `log_prob`
function entails a `reduce_sum` over the rightmost `reinterpreted_batch_ndims`
after calling the base distribution's `log_prob`. In other words, since the
batch dimension(s) index independent distributions, the resultant multivariate
will have independent components.
#### Mathematical Details
The probability function is,
```none
prob(x; reinterpreted_batch_ndims) = tf.reduce_prod(
dist.prob(x),
axis=-1-range(reinterpreted_batch_ndims))
```
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Make independent distribution from a 2-batch Normal.
ind = tfd.Independent(
distribution=tfd.Normal(loc=[-1., 1], scale=[0.1, 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2]
# Make independent distribution from a 2-batch bivariate Normal.
ind = tfd.Independent(
distribution=tfd.MultivariateNormalDiag(
loc=[[-1., 1], [1, -1]],
scale_identity_multiplier=[1., 0.5]),
reinterpreted_batch_ndims=1)
# All batch dims have been "absorbed" into event dims.
ind.batch_shape # ==> []
ind.event_shape # ==> [2, 2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self, distribution, reinterpreted_batch_ndims=None,
validate_args=False, name=None):
"""Construct a `Independent` distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
reinterpreted_batch_ndims: Scalar, integer number of rightmost batch dims
which will be regarded as event dims. When `None` all but the first
batch axis (batch axis 0) will be transferred to event dimensions
(analogous to `tf.compat.v1.layers.flatten`).
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for ops managed by the distribution.
Default value: `Independent + distribution.name`.
Raises:
ValueError: if `reinterpreted_batch_ndims` exceeds
`distribution.batch_ndims`
"""
parameters = dict(locals())
name = name or "Independent" + distribution.name
self._distribution = distribution
with ops.name_scope(name) as name:
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = self._get_default_reinterpreted_batch_ndims(
distribution)
reinterpreted_batch_ndims = ops.convert_to_tensor(
reinterpreted_batch_ndims,
dtype=dtypes.int32,
name="reinterpreted_batch_ndims")
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
self._static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
if self._static_reinterpreted_batch_ndims is not None:
self._reinterpreted_batch_ndims = self._static_reinterpreted_batch_ndims
super(Independent, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
graph_parents=(
[reinterpreted_batch_ndims] +
distribution._graph_parents), # pylint: disable=protected-access
name=name)
self._runtime_assertions = self._make_runtime_assertions(
distribution, reinterpreted_batch_ndims, validate_args)
@property
def distribution(self):
return self._distribution
@property
def reinterpreted_batch_ndims(self):
return self._reinterpreted_batch_ndims
def _batch_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
return batch_shape[:batch_ndims - self.reinterpreted_batch_ndims]
def _batch_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[:d]
def _event_shape_tensor(self):
with ops.control_dependencies(self._runtime_assertions):
batch_shape = self.distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
return array_ops.concat([
batch_shape[batch_ndims - self.reinterpreted_batch_ndims:],
self.distribution.event_shape_tensor(),
], axis=0)
def _event_shape(self):
batch_shape = self.distribution.batch_shape
if (self._static_reinterpreted_batch_ndims is None
or batch_shape.ndims is None):
return tensor_shape.TensorShape(None)
d = batch_shape.ndims - self._static_reinterpreted_batch_ndims
return batch_shape[d:].concatenate(self.distribution.event_shape)
def _sample_n(self, n, seed):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.sample(sample_shape=n, seed=seed)
def _log_prob(self, x):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.log_prob(x))
def _entropy(self):
with ops.control_dependencies(self._runtime_assertions):
return self._reduce_sum(self.distribution.entropy())
def _mean(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mean()
def _variance(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.variance()
def _stddev(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.stddev()
def _mode(self):
with ops.control_dependencies(self._runtime_assertions):
return self.distribution.mode()
def _make_runtime_assertions(
self, distribution, reinterpreted_batch_ndims, validate_args):
assertions = []
static_reinterpreted_batch_ndims = tensor_util.constant_value(
reinterpreted_batch_ndims)
batch_ndims = distribution.batch_shape.ndims
if batch_ndims is not None and static_reinterpreted_batch_ndims is not None:
if static_reinterpreted_batch_ndims > batch_ndims:
raise ValueError("reinterpreted_batch_ndims({}) cannot exceed "
"distribution.batch_ndims({})".format(
static_reinterpreted_batch_ndims, batch_ndims))
elif validate_args:
batch_shape = distribution.batch_shape_tensor()
dim0 = tensor_shape.dimension_value(
batch_shape.shape.with_rank_at_least(1)[0])
batch_ndims = (
dim0
if dim0 is not None
else array_ops.shape(batch_shape)[0])
assertions.append(check_ops.assert_less_equal(
reinterpreted_batch_ndims, batch_ndims,
message=("reinterpreted_batch_ndims cannot exceed "
"distribution.batch_ndims")))
return assertions
def _reduce_sum(self, stat):
if self._static_reinterpreted_batch_ndims is None:
range_ = math_ops.range(self._reinterpreted_batch_ndims)
else:
range_ = np.arange(self._static_reinterpreted_batch_ndims)
return math_ops.reduce_sum(stat, axis=-1-range_)
def _get_default_reinterpreted_batch_ndims(self, distribution):
"""Computes the default value for reinterpreted_batch_ndim __init__ arg."""
ndims = distribution.batch_shape.ndims
if ndims is None:
which_maximum = math_ops.maximum
ndims = array_ops.shape(distribution.batch_shape_tensor())[0]
else:
which_maximum = np.maximum
return which_maximum(0, ndims - 1)
@kullback_leibler.RegisterKL(Independent, Independent)
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _kl_independent(a, b, name="kl_independent"):
"""Batched KL divergence `KL(a || b)` for Independent distributions.
We can leverage the fact that
```
KL(Independent(a) || Independent(b)) = sum(KL(a || b))
```
where the sum is over the `reinterpreted_batch_ndims`.
Args:
a: Instance of `Independent`.
b: Instance of `Independent`.
name: (optional) name to use for created ops. Default "kl_independent".
Returns:
Batchwise `KL(a || b)`.
Raises:
ValueError: If the event space for `a` and `b`, or their underlying
distributions don't match.
"""
p = a.distribution
q = b.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if a.event_shape.is_fully_defined() and b.event_shape.is_fully_defined():
if a.event_shape == b.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = a.event_shape.ndims - p.event_shape.ndims
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
else:
raise NotImplementedError("KL between Independents with different "
"event shapes not supported.")
else:
raise ValueError("Event shapes do not match.")
else:
with ops.control_dependencies([
check_ops.assert_equal(a.event_shape_tensor(), b.event_shape_tensor()),
check_ops.assert_equal(p.event_shape_tensor(), q.event_shape_tensor())
]):
num_reduce_dims = (
array_ops.shape(a.event_shape_tensor()[0]) -
array_ops.shape(p.event_shape_tensor()[0]))
reduce_dims = math_ops.range(-num_reduce_dims - 1, -1, 1)
return math_ops.reduce_sum(
kullback_leibler.kl_divergence(p, q, name=name), axis=reduce_dims)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/independent.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Relaxed OneHotCategorical distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class ExpRelaxedOneHotCategorical(distribution.Distribution):
"""ExpRelaxedOneHotCategorical distribution with temperature and logits.
An ExpRelaxedOneHotCategorical distribution is a log-transformed
RelaxedOneHotCategorical distribution. The RelaxedOneHotCategorical is a
distribution over random probability vectors, vectors of positive real
values that sum to one, which continuously approximates a OneHotCategorical.
The degree of approximation is controlled by a temperature: as the temperature
goes to 0 the RelaxedOneHotCategorical becomes discrete with a distribution
described by the logits, as the temperature goes to infinity the
RelaxedOneHotCategorical becomes the constant distribution that is identically
the constant vector of (1/event_size, ..., 1/event_size).
Because computing log-probabilities of the RelaxedOneHotCategorical can
suffer from underflow issues, this class is one solution for loss
functions that depend on log-probabilities, such as the KL Divergence found
in the variational autoencoder loss. The KL divergence between two
distributions is invariant under invertible transformations, so evaluating
KL divergences of ExpRelaxedOneHotCategorical samples, which are always
followed by a `tf.exp` op, is equivalent to evaluating KL divergences of
RelaxedOneHotCategorical samples. See the appendix of Maddison et al., 2016
for more mathematical details, where this distribution is called the
ExpConcrete.
#### Examples
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution. If those samples
are followed by a `tf.exp` op, then they are distributed as a relaxed onehot
categorical.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = ExpRelaxedOneHotCategorical(temperature, probs=p)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 0 and the
others very negative. The 2nd class is the most likely to be the largest
component in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (-log(3), -log(3), -log(3)) vector.
The 2nd class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
temperature,
logits=None,
probs=None,
dtype=None,
validate_args=False,
allow_nan_stats=True,
name="ExpRelaxedOneHotCategorical"):
"""Initialize ExpRelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of ExpRelaxedCategorical distributions. The temperature should
be positive.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of ExpRelaxedCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of logits for each class. Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of ExpRelaxedCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of probabilities for each
class. Only one of `logits` or `probs` should be passed in.
dtype: The type of the event samples (default: inferred from
logits/probs).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[logits, probs, temperature]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
if dtype is None:
dtype = self._logits.dtype
if not validate_args:
temperature = math_ops.cast(temperature, dtype)
with ops.control_dependencies([check_ops.assert_positive(temperature)]
if validate_args else []):
self._temperature = array_ops.identity(temperature, name="temperature")
self._temperature_2d = array_ops.reshape(temperature, [-1, 1],
name="temperature_2d")
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
with ops.name_scope(name="event_size"):
self._event_size = array_ops.shape(self._logits)[-1]
super(ExpRelaxedOneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs,
self._temperature],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def temperature(self):
"""Batchwise temperature tensor of a RelaxedCategorical."""
return self._temperature
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of probabilities summing to one."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self._logits)[:-1]
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.logits)[-1:]
def _event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits * array_ops.ones(sample_shape, dtype=self.dtype)
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.shape(logits_2d),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
gumbel = -math_ops.log(-math_ops.log(uniform))
noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)
samples = nn_ops.log_softmax(noisy_logits)
ret = array_ops.reshape(samples, sample_shape)
return ret
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
# compute the normalization constant
k = math_ops.cast(self.event_size, x.dtype)
log_norm_const = (math_ops.lgamma(k)
+ (k - 1.)
* math_ops.log(self.temperature))
# compute the unnormalized density
log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
# combine unnormalized density with normalization constant
log_prob = log_norm_const + log_unnorm_prob
# Reshapes log_prob to be consistent with shape of user-supplied logits
ret = array_ops.reshape(log_prob, logits_shape)
return ret
def _assert_valid_sample(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_positive(x),
check_ops.assert_near(
array_ops.zeros([], dtype=self.dtype),
math_ops.reduce_logsumexp(x, axis=[-1])),
], x)
class RelaxedOneHotCategorical(
transformed_distribution.TransformedDistribution):
"""RelaxedOneHotCategorical distribution with temperature and logits.
The RelaxedOneHotCategorical is a distribution over random probability
vectors, vectors of positive real values that sum to one, which continuously
approximates a OneHotCategorical. The degree of approximation is controlled by
a temperature: as the temperature goes to 0 the RelaxedOneHotCategorical
becomes discrete with a distribution described by the `logits` or `probs`
parameters, as the temperature goes to infinity the RelaxedOneHotCategorical
becomes the constant distribution that is identically the constant vector of
(1/event_size, ..., 1/event_size).
The RelaxedOneHotCategorical distribution was concurrently introduced as the
Gumbel-Softmax (Jang et al., 2016) and Concrete (Maddison et al., 2016)
distributions for use as a reparameterized continuous approximation to the
`Categorical` one-hot distribution. If you use this distribution, please cite
both papers.
#### Examples
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 1 and the
others nearly 0. The 2nd class is the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (1/3, 1/3, 1/3) vector. The 2nd
class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(
self,
temperature,
logits=None,
probs=None,
dtype=None,
validate_args=False,
allow_nan_stats=True,
name="RelaxedOneHotCategorical"):
"""Initialize RelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedOneHotCategorical distributions. The temperature
should be positive.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of RelaxedOneHotCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of logits for each class. Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of RelaxedOneHotCategorical distributions. The first `N - 1`
dimensions index into a batch of independent distributions and the last
dimension represents a vector of probabilities for each class. Only one
of `logits` or `probs` should be passed in.
dtype: The type of the event samples (default: inferred from
logits/probs).
validate_args: Unused in this distribution.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
dist = ExpRelaxedOneHotCategorical(temperature,
logits=logits,
probs=probs,
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
super(RelaxedOneHotCategorical, self).__init__(dist,
bijectors.Exp(),
name=name)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Negative Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class NegativeBinomial(distribution.Distribution):
"""NegativeBinomial distribution.
The NegativeBinomial distribution is related to the experiment of performing
Bernoulli trials in sequence. Given a Bernoulli trial with probability `p` of
success, the NegativeBinomial distribution represents the distribution over
the number of successes `s` that occur until we observe `f` failures.
The probability mass function (pmf) is,
```none
pmf(s; f, p) = p**s (1 - p)**f / Z
Z = s! (f - 1)! / (s + f - 1)!
```
where:
* `total_count = f`,
* `probs = p`,
* `Z` is the normalizaing constant, and,
* `n!` is the factorial of `n`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="NegativeBinomial"):
"""Construct NegativeBinomial distributions.
Args:
total_count: Non-negative floating-point `Tensor` with shape
broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as
`probs` or `logits`. Defines this as a batch of `N1 x ... x Nm`
different Negative Binomial distributions. In practice, this represents
the number of negative Bernoulli trials to stop at (the `total_count`
of failures), but this is still a valid distribution when
`total_count` is a non-integer.
logits: Floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents logits for the probability of success for
independent Negative Binomial distributions and must be in the open
interval `(-inf, inf)`. Only one of `logits` or `probs` should be
specified.
probs: Positive floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents the probability of success for independent
Negative Binomial distributions and must be in the open interval
`(0, 1)`. Only one of `logits` or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(total_count)] if validate_args else []):
self._total_count = array_ops.identity(total_count)
super(NegativeBinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count, self._probs, self._logits],
name=name)
@property
def total_count(self):
"""Number of negative trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.betainc(self.total_count, 1. + x,
math_ops.sigmoid(-self.logits))
def _log_prob(self, x):
return (self._log_unnormalized_prob(x)
- self._log_normalization(x))
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (self.total_count * math_ops.log_sigmoid(-self.logits)
+ x * math_ops.log_sigmoid(self.logits))
def _log_normalization(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (-math_ops.lgamma(self.total_count + x)
+ math_ops.lgamma(1. + x)
+ math_ops.lgamma(self.total_count))
def _mean(self):
return self.total_count * math_ops.exp(self.logits)
def _mode(self):
adjusted_count = array_ops.where(
1. < self.total_count,
self.total_count - 1.,
array_ops.zeros_like(self.total_count))
return math_ops.floor(adjusted_count * math_ops.exp(self.logits))
def _variance(self):
return self._mean() / math_ops.sigmoid(-self.logits)
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/negative_binomial.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing distributions and/or bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import histogram_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_ops
__all__ = [
"DiscreteScalarDistributionTestHelpers",
"VectorDistributionTestHelpers",
]
class DiscreteScalarDistributionTestHelpers(object):
"""DiscreteScalarDistributionTestHelpers."""
def run_test_sample_consistent_log_prob(self,
sess_run_fn,
dist,
num_samples=int(1e5),
num_threshold=int(1e3),
seed=42,
batch_size=None,
rtol=1e-2,
atol=0.):
"""Tests that sample/log_prob are consistent with each other.
"Consistency" means that `sample` and `log_prob` correspond to the same
distribution.
Note: this test only verifies a necessary condition for consistency--it does
does not verify sufficiency hence does not prove `sample`, `log_prob` truly
are consistent.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
num_threshold: Python `int` scalar indicating the number of samples a
bucket must contain before being compared to the probability.
Default value: 1e3; must be at least 1. Warning, set too high will cause
test to falsely pass but setting too low will cause the test to
falsely fail.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
batch_size: Hint for unpacking result of samples. Default: `None` means
batch_size is inferred.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
Raises:
ValueError: if `num_threshold < 1`.
"""
if num_threshold < 1:
raise ValueError(
"num_threshold({}) must be at least 1.".format(num_threshold))
# Histogram only supports vectors so we call it once per batch coordinate.
y = dist.sample(num_samples, seed=seed)
y = array_ops.reshape(y, shape=[num_samples, -1])
if batch_size is None:
batch_size = math_ops.reduce_prod(dist.batch_shape_tensor())
batch_dims = array_ops.shape(dist.batch_shape_tensor())[0]
edges_expanded_shape = 1 + array_ops.pad([-2], paddings=[[0, batch_dims]])
for b, x in enumerate(array_ops.unstack(y, num=batch_size, axis=1)):
counts, edges = self.histogram(x)
edges = array_ops.reshape(edges, edges_expanded_shape)
probs = math_ops.exp(dist.log_prob(edges))
probs = array_ops.reshape(probs, shape=[-1, batch_size])[:, b]
[counts_, probs_] = sess_run_fn([counts, probs])
valid = counts_ > num_threshold
probs_ = probs_[valid]
counts_ = counts_[valid]
self.assertAllClose(probs_, counts_ / num_samples, rtol=rtol, atol=atol)
def run_test_sample_consistent_mean_variance(self,
sess_run_fn,
dist,
num_samples=int(1e5),
seed=24,
rtol=1e-2,
atol=0.):
"""Tests that sample/mean/variance are consistent with each other.
"Consistency" means that `sample`, `mean`, `variance`, etc all correspond
to the same distribution.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
"""
x = math_ops.cast(dist.sample(num_samples, seed=seed), dtypes.float32)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_variance = math_ops.reduce_mean(
math_ops.square(x - sample_mean), axis=0)
sample_stddev = math_ops.sqrt(sample_variance)
[sample_mean_, sample_variance_, sample_stddev_, mean_, variance_,
stddev_] = sess_run_fn([
sample_mean,
sample_variance,
sample_stddev,
dist.mean(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(mean_, sample_mean_, rtol=rtol, atol=atol)
self.assertAllClose(variance_, sample_variance_, rtol=rtol, atol=atol)
self.assertAllClose(stddev_, sample_stddev_, rtol=rtol, atol=atol)
def histogram(self, x, value_range=None, nbins=None, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram
counting the number of entries in `values` that fell into every bin. The
bins are equal width and determined by the arguments `value_range` and
`nbins`.
Args:
x: 1D numeric `Tensor` of items to count.
value_range: Shape [2] `Tensor`. `new_values <= value_range[0]` will be
mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
`hist[-1]`. Must be same dtype as `x`.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
name: Python `str` name prefixed to Ops created by this class.
Returns:
counts: 1D `Tensor` of counts, i.e.,
`counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
edges: 1D `Tensor` characterizing intervals used for counting.
"""
with ops.name_scope(name, "histogram", [x]):
x = ops.convert_to_tensor(x, name="x")
if value_range is None:
value_range = [math_ops.reduce_min(x), 1 + math_ops.reduce_max(x)]
value_range = ops.convert_to_tensor(value_range, name="value_range")
lo = value_range[0]
hi = value_range[1]
if nbins is None:
nbins = math_ops.cast(hi - lo, dtypes.int32)
delta = (hi - lo) / math_ops.cast(
nbins, dtype=value_range.dtype.base_dtype)
edges = math_ops.range(
start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
counts = histogram_ops.histogram_fixed_width(
x, value_range=value_range, nbins=nbins)
return counts, edges
class VectorDistributionTestHelpers(object):
"""VectorDistributionTestHelpers helps test vector-event distributions."""
def run_test_sample_consistent_log_prob(self,
sess_run_fn,
dist,
num_samples=int(1e5),
radius=1.,
center=0.,
seed=42,
rtol=1e-2,
atol=0.):
"""Tests that sample/log_prob are mutually consistent.
"Consistency" means that `sample` and `log_prob` correspond to the same
distribution.
The idea of this test is to compute the Monte-Carlo estimate of the volume
enclosed by a hypersphere, i.e., the volume of an `n`-ball. While we could
choose an arbitrary function to integrate, the hypersphere's volume is nice
because it is intuitive, has an easy analytical expression, and works for
`dimensions > 1`.
Technical Details:
Observe that:
```none
int_{R**d} dx [x in Ball(radius=r, center=c)]
= E_{p(X)}[ [X in Ball(r, c)] / p(X) ]
= lim_{m->infty} m**-1 sum_j^m [x[j] in Ball(r, c)] / p(x[j]),
where x[j] ~iid p(X)
```
Thus, for fixed `m`, the above is approximately true when `sample` and
`log_prob` are mutually consistent.
Furthermore, the above calculation has the analytical result:
`pi**(d/2) r**d / Gamma(1 + d/2)`.
Note: this test only verifies a necessary condition for consistency--it does
does not verify sufficiency hence does not prove `sample`, `log_prob` truly
are consistent. For this reason we recommend testing several different
hyperspheres (assuming the hypersphere is supported by the distribution).
Furthermore, we gain additional trust in this test when also tested `sample`
against the first, second moments
(`run_test_sample_consistent_mean_covariance`); it is probably unlikely that
a "best-effort" implementation of `log_prob` would incorrectly pass both
tests and for different hyperspheres.
For a discussion on the analytical result (second-line) see:
https://en.wikipedia.org/wiki/Volume_of_an_n-ball.
For a discussion of importance sampling (fourth-line) see:
https://en.wikipedia.org/wiki/Importance_sampling.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`. The
distribution must have non-zero probability of sampling every point
enclosed by the hypersphere.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
radius: Python `float`-type indicating the radius of the `n`-ball which
we're computing the volume.
center: Python floating-type vector (or scalar) indicating the center of
the `n`-ball which we're computing the volume. When scalar, the value is
broadcast to all event dims.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
actual- and approximate-volumes.
atol: Python `float`-type indicating the admissible absolute error between
actual- and approximate-volumes. In general this should be zero since a
typical radius implies a non-zero volume.
"""
def actual_hypersphere_volume(dims, radius):
# https://en.wikipedia.org/wiki/Volume_of_an_n-ball
# Using tf.math.lgamma because we'd have to otherwise use SciPy which is
# not a required dependency of core.
radius = np.asarray(radius)
dims = math_ops.cast(dims, dtype=radius.dtype)
return math_ops.exp((dims / 2.) * np.log(np.pi) -
math_ops.lgamma(1. + dims / 2.) +
dims * math_ops.log(radius))
def is_in_ball(x, radius, center):
return math_ops.cast(
linalg_ops.norm(x - center, axis=-1) <= radius, dtype=x.dtype)
def monte_carlo_hypersphere_volume(dist, num_samples, radius, center):
# https://en.wikipedia.org/wiki/Importance_sampling
x = dist.sample(num_samples, seed=seed)
x = array_ops.identity(x) # Invalidate bijector cacheing.
return math_ops.reduce_mean(
math_ops.exp(-dist.log_prob(x)) * is_in_ball(x, radius, center),
axis=0)
# Build graph.
with ops.name_scope(
"run_test_sample_consistent_log_prob",
values=[num_samples, radius, center] + dist._graph_parents): # pylint: disable=protected-access
batch_shape = dist.batch_shape_tensor()
actual_volume = actual_hypersphere_volume(
dims=dist.event_shape_tensor()[0], radius=radius)
sample_volume = monte_carlo_hypersphere_volume(
dist, num_samples=num_samples, radius=radius, center=center)
init_op = variables_ops.global_variables_initializer()
# Execute graph.
sess_run_fn(init_op)
[batch_shape_, actual_volume_,
sample_volume_] = sess_run_fn([batch_shape, actual_volume, sample_volume])
# Check results.
self.assertAllClose(
np.tile(actual_volume_, reps=batch_shape_),
sample_volume_,
rtol=rtol,
atol=atol)
def run_test_sample_consistent_mean_covariance(self,
sess_run_fn,
dist,
num_samples=int(1e5),
seed=24,
rtol=1e-2,
atol=0.1,
cov_rtol=None,
cov_atol=None):
"""Tests that sample/mean/covariance are consistent with each other.
"Consistency" means that `sample`, `mean`, `covariance`, etc all correspond
to the same distribution.
Args:
sess_run_fn: Python `callable` taking `list`-like of `Tensor`s and
returning a list of results after running one "step" of TensorFlow
computation, typically set to `sess.run`.
dist: Distribution instance or object which implements `sample`,
`log_prob`, `event_shape_tensor` and `batch_shape_tensor`.
num_samples: Python `int` scalar indicating the number of Monte-Carlo
samples to draw from `dist`.
seed: Python `int` indicating the seed to use when sampling from `dist`.
In general it is not recommended to use `None` during a test as this
increases the likelihood of spurious test failure.
rtol: Python `float`-type indicating the admissible relative error between
analytical and sample statistics.
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
cov_rtol: Python `float`-type indicating the admissible relative error
between analytical and sample covariance. Default: rtol.
cov_atol: Python `float`-type indicating the admissible absolute error
between analytical and sample covariance. Default: atol.
"""
x = dist.sample(num_samples, seed=seed)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_covariance = math_ops.reduce_mean(
_vec_outer_square(x - sample_mean), axis=0)
sample_variance = array_ops.matrix_diag_part(sample_covariance)
sample_stddev = math_ops.sqrt(sample_variance)
[
sample_mean_, sample_covariance_, sample_variance_, sample_stddev_,
mean_, covariance_, variance_, stddev_
] = sess_run_fn([
sample_mean,
sample_covariance,
sample_variance,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(mean_, sample_mean_, rtol=rtol, atol=atol)
self.assertAllClose(
covariance_,
sample_covariance_,
rtol=cov_rtol or rtol,
atol=cov_atol or atol)
self.assertAllClose(variance_, sample_variance_, rtol=rtol, atol=atol)
self.assertAllClose(stddev_, sample_stddev_, rtol=rtol, atol=atol)
def _vec_outer_square(x, name=None):
"""Computes the outer-product of a vector, i.e., x.T x."""
with ops.name_scope(name, "vec_osquare", [x]):
return x[..., :, array_ops.newaxis] * x[..., array_ops.newaxis, :]
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/test_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Cauchy distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
__all__ = [
"Cauchy",
]
class Cauchy(distribution.Distribution):
"""The Cauchy distribution with location `loc` and scale `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = 1 / (pi scale (1 + z**2))
z = (x - loc) / scale
```
where `loc` is the location, and `scale` is the scale.
The Cauchy distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e.
`Y ~ Cauchy(loc, scale)` is equivalent to,
```none
X ~ Cauchy(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Cauchy distribution.
dist = tfd.Cauchy(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Cauchy distributions.
dist = tfd.Cauchy(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Arguments are broadcast when possible.
# Define a batch of two scalar valued Cauchy distributions.
# Both have median 1, but different scales.
dist = tfd.Cauchy(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Cauchy"):
"""Construct Cauchy distributions.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the modes of the distribution(s).
scale: Floating point tensor; the locations of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)]
if validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Cauchy, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(self.loc.shape, self.scale.shape)
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
probs = random_ops.random_uniform(
shape=shape, minval=0., maxval=1., dtype=self.dtype, seed=seed)
return self._quantile(probs)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _cdf(self, x):
return math_ops.atan(self._z(x)) / np.pi + 0.5
def _log_cdf(self, x):
return math_ops.log1p(2 / np.pi * math_ops.atan(self._z(x))) - np.log(2)
def _log_unnormalized_prob(self, x):
return -math_ops.log1p(math_ops.square(self._z(x)))
def _log_normalization(self):
return np.log(np.pi) + math_ops.log(self.scale)
def _entropy(self):
h = np.log(4 * np.pi) + math_ops.log(self.scale)
return h * array_ops.ones_like(self.loc)
def _quantile(self, p):
return self.loc + self.scale * math_ops.tan(np.pi * (p - 0.5))
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x`."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
def _mean(self):
if self.allow_nan_stats:
return array_ops.fill(self.batch_shape_tensor(),
self.dtype.as_numpy_dtype(np.nan))
else:
raise ValueError("`mean` is undefined for Cauchy distribution.")
def _stddev(self):
if self.allow_nan_stats:
return array_ops.fill(self.batch_shape_tensor(),
self.dtype.as_numpy_dtype(np.nan))
else:
raise ValueError("`stddev` is undefined for Cauchy distribution.")
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/cauchy.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Conditional Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import conditional_distribution
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import transformed_distribution
from tensorflow.python.ops.distributions import util as distribution_util
# pylint: disable=protected-access
_concat_vectors = transformed_distribution._concat_vectors
# pylint: enable=protected-access
__all__ = [
"ConditionalTransformedDistribution",
]
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class ConditionalTransformedDistribution(
conditional_distribution.ConditionalDistribution,
transformed_distribution.TransformedDistribution):
"""A TransformedDistribution that allows intrinsic conditioning."""
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None,
distribution_kwargs=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=sample_shape,
seed=seed,
**distribution_kwargs)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name,
bijector_kwargs=None,
distribution_kwargs=None):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, bijector_kwargs, distribution_kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
bijector_kwargs = bijector_kwargs or {}
y = self.bijector.forward(x, **bijector_kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj,
distribution_kwargs)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
return math_ops.cast(ildj, log_prob.dtype) + log_prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(
y, event_ndims=event_ndims, **bijector_kwargs)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, distribution_kwargs)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, distribution_kwargs)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, distribution_kwargs):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x, **distribution_kwargs)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
return math_ops.exp(math_ops.cast(ildj, prob.dtype)) * prob
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(kwargs_dict=_condition_kwargs_dict)
def _quantile(self, value, bijector_kwargs=None, distribution_kwargs=None):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value, **distribution_kwargs)
return self.bijector.forward(inv_cdf, **bijector_kwargs)
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/conditional_transformed_distribution.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gumbel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class _Gumbel(distribution.Distribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The cumulative density function of this distribution is,
```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Gumbel"):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -math_ops.exp(-self._z(x))
def _cdf(self, x):
return math_ops.exp(-math_ops.exp(-self._z(x)))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - math_ops.exp(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 1 + math_ops.log(scale) + np.euler_gamma
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6)
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
tensorflow-master
|
tensorflow/contrib/distributions/python/ops/gumbel.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.