Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_cython_special.py
|
from __future__ import division, print_function, absolute_import
from itertools import product
from numpy.testing import assert_allclose
import pytest
from scipy import special
from scipy.special import cython_special
int_points = [-10, -1, 1, 10]
real_points = [-10.0, -1.0, 1.0, 10.0]
complex_points = [complex(*tup) for tup in product(real_points, repeat=2)]
CYTHON_SIGNATURE_MAP = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i':'int',
'l': 'long'
}
TEST_POINTS = {
'f': real_points,
'd': real_points,
'g': real_points,
'F': complex_points,
'D': complex_points,
'G': complex_points,
'i': int_points,
'l': int_points,
}
PARAMS = [
(special.agm, cython_special.agm, ('dd',), None),
(special.airy, cython_special._airy_pywrap, ('d', 'D'), None),
(special.airye, cython_special._airye_pywrap, ('d', 'D'), None),
(special.bdtr, cython_special.bdtr, ('lld', 'ddd'), None),
(special.bdtrc, cython_special.bdtrc, ('lld', 'ddd'), None),
(special.bdtri, cython_special.bdtri, ('lld', 'ddd'), None),
(special.bdtrik, cython_special.bdtrik, ('ddd',), None),
(special.bdtrin, cython_special.bdtrin, ('ddd',), None),
(special.bei, cython_special.bei, ('d',), None),
(special.beip, cython_special.beip, ('d',), None),
(special.ber, cython_special.ber, ('d',), None),
(special.berp, cython_special.berp, ('d',), None),
(special.besselpoly, cython_special.besselpoly, ('ddd',), None),
(special.beta, cython_special.beta, ('dd',), None),
(special.betainc, cython_special.betainc, ('ddd',), None),
(special.betaincinv, cython_special.betaincinv, ('ddd',), None),
(special.betaln, cython_special.betaln, ('dd',), None),
(special.binom, cython_special.binom, ('dd',), None),
(special.boxcox, cython_special.boxcox, ('dd',), None),
(special.boxcox1p, cython_special.boxcox1p, ('dd',), None),
(special.btdtr, cython_special.btdtr, ('ddd',), None),
(special.btdtri, cython_special.btdtri, ('ddd',), None),
(special.btdtria, cython_special.btdtria, ('ddd',), None),
(special.btdtrib, cython_special.btdtrib, ('ddd',), None),
(special.cbrt, cython_special.cbrt, ('d',), None),
(special.chdtr, cython_special.chdtr, ('dd',), None),
(special.chdtrc, cython_special.chdtrc, ('dd',), None),
(special.chdtri, cython_special.chdtri, ('dd',), None),
(special.chdtriv, cython_special.chdtriv, ('dd',), None),
(special.chndtr, cython_special.chndtr, ('ddd',), None),
(special.chndtridf, cython_special.chndtridf, ('ddd',), None),
(special.chndtrinc, cython_special.chndtrinc, ('ddd',), None),
(special.chndtrix, cython_special.chndtrix, ('ddd',), None),
(special.cosdg, cython_special.cosdg, ('d',), None),
(special.cosm1, cython_special.cosm1, ('d',), None),
(special.cotdg, cython_special.cotdg, ('d',), None),
(special.dawsn, cython_special.dawsn, ('d', 'D'), None),
(special.ellipe, cython_special.ellipe, ('d',), None),
(special.ellipeinc, cython_special.ellipeinc, ('dd',), None),
(special.ellipj, cython_special._ellipj_pywrap, ('dd',), None),
(special.ellipkinc, cython_special.ellipkinc, ('dd',), None),
(special.ellipkm1, cython_special.ellipkm1, ('d',), None),
(special.entr, cython_special.entr, ('d',), None),
(special.erf, cython_special.erf, ('d', 'D'), None),
(special.erfc, cython_special.erfc, ('d', 'D'), None),
(special.erfcx, cython_special.erfcx, ('d', 'D'), None),
(special.erfi, cython_special.erfi, ('d', 'D'), None),
(special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None),
(special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_hermite, cython_special.eval_hermite, ('ld',), None),
(special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None),
(special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None),
(special.exp1, cython_special.exp1, ('d', 'D'), None),
(special.exp10, cython_special.exp10, ('d',), None),
(special.exp2, cython_special.exp2, ('d',), None),
(special.expi, cython_special.expi, ('d', 'D'), None),
(special.expit, cython_special.expit, ('f', 'd', 'g'), None),
(special.expm1, cython_special.expm1, ('d', 'D'), None),
(special.expn, cython_special.expn, ('ld', 'dd'), None),
(special.exprel, cython_special.exprel, ('d',), None),
(special.fdtr, cython_special.fdtr, ('ddd',), None),
(special.fdtrc, cython_special.fdtrc, ('ddd',), None),
(special.fdtri, cython_special.fdtri, ('ddd',), None),
(special.fdtridfd, cython_special.fdtridfd, ('ddd',), None),
(special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None),
(special.gamma, cython_special.gamma, ('d', 'D'), None),
(special.gammainc, cython_special.gammainc, ('dd',), None),
(special.gammaincc, cython_special.gammaincc, ('dd',), None),
(special.gammainccinv, cython_special.gammainccinv, ('dd',), None),
(special.gammaincinv, cython_special.gammaincinv, ('dd',), None),
(special.gammaln, cython_special.gammaln, ('d',), None),
(special.gammasgn, cython_special.gammasgn, ('d',), None),
(special.gdtr, cython_special.gdtr, ('ddd',), None),
(special.gdtrc, cython_special.gdtrc, ('ddd',), None),
(special.gdtria, cython_special.gdtria, ('ddd',), None),
(special.gdtrib, cython_special.gdtrib, ('ddd',), None),
(special.gdtrix, cython_special.gdtrix, ('ddd',), None),
(special.hankel1, cython_special.hankel1, ('dD',), None),
(special.hankel1e, cython_special.hankel1e, ('dD',), None),
(special.hankel2, cython_special.hankel2, ('dD',), None),
(special.hankel2e, cython_special.hankel2e, ('dD',), None),
(special.huber, cython_special.huber, ('dd',), None),
(special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None),
(special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None),
(special.hyp1f2, cython_special._hyp1f2_pywrap, ('dddd',), None),
(special.hyp2f0, cython_special._hyp2f0_pywrap, ('dddl', 'dddd'), None),
(special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None),
(special.hyp3f0, cython_special._hyp3f0_pywrap, ('dddd',), None),
(special.hyperu, cython_special.hyperu, ('ddd',), None),
(special.i0, cython_special.i0, ('d',), None),
(special.i0e, cython_special.i0e, ('d',), None),
(special.i1, cython_special.i1, ('d',), None),
(special.i1e, cython_special.i1e, ('d',), None),
(special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None),
(special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None),
(special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None),
(special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None),
(special.it2struve0, cython_special.it2struve0, ('d',), None),
(special.itairy, cython_special._itairy_pywrap, ('d',), None),
(special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None),
(special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None),
(special.itmodstruve0, cython_special.itmodstruve0, ('d',), None),
(special.itstruve0, cython_special.itstruve0, ('d',), None),
(special.iv, cython_special.iv, ('dd', 'dD'), None),
(special.ive, cython_special.ive, ('dd', 'dD'), None),
(special.j0, cython_special.j0, ('d',), None),
(special.j1, cython_special.j1, ('d',), None),
(special.jv, cython_special.jv, ('dd', 'dD'), None),
(special.jve, cython_special.jve, ('dd', 'dD'), None),
(special.k0, cython_special.k0, ('d',), None),
(special.k0e, cython_special.k0e, ('d',), None),
(special.k1, cython_special.k1, ('d',), None),
(special.k1e, cython_special.k1e, ('d',), None),
(special.kei, cython_special.kei, ('d',), None),
(special.keip, cython_special.keip, ('d',), None),
(special.kelvin, cython_special._kelvin_pywrap, ('d',), None),
(special.ker, cython_special.ker, ('d',), None),
(special.kerp, cython_special.kerp, ('d',), None),
(special.kl_div, cython_special.kl_div, ('dd',), None),
(special.kn, cython_special.kn, ('ld', 'dd'), None),
(special.kolmogi, cython_special.kolmogi, ('d',), None),
(special.kolmogorov, cython_special.kolmogorov, ('d',), None),
(special.kv, cython_special.kv, ('dd', 'dD'), None),
(special.kve, cython_special.kve, ('dd', 'dD'), None),
(special.log1p, cython_special.log1p, ('d', 'D'), None),
(special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None),
(special.loggamma, cython_special.loggamma, ('D',), None),
(special.logit, cython_special.logit, ('f', 'd', 'g'), None),
(special.lpmv, cython_special.lpmv, ('ddd',), None),
(special.mathieu_a, cython_special.mathieu_a, ('dd',), None),
(special.mathieu_b, cython_special.mathieu_b, ('dd',), None),
(special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None),
(special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None),
(special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None),
(special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None),
(special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None),
(special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None),
(special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None),
(special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None),
(special.modstruve, cython_special.modstruve, ('dd',), None),
(special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None),
(special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None),
(special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None),
(special.nbdtrik, cython_special.nbdtrik, ('ddd',), None),
(special.nbdtrin, cython_special.nbdtrin, ('ddd',), None),
(special.ncfdtr, cython_special.ncfdtr, ('dddd',), None),
(special.ncfdtri, cython_special.ncfdtri, ('dddd',), None),
(special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None),
(special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None),
(special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None),
(special.nctdtr, cython_special.nctdtr, ('ddd',), None),
(special.nctdtridf, cython_special.nctdtridf, ('ddd',), None),
(special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None),
(special.nctdtrit, cython_special.nctdtrit, ('ddd',), None),
(special.ndtr, cython_special.ndtr, ('d', 'D'), None),
(special.ndtri, cython_special.ndtri, ('d',), None),
(special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None),
(special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None),
(special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None),
(special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None),
(special.obl_cv, cython_special.obl_cv, ('ddd',), None),
(special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pbdv, cython_special._pbdv_pywrap, ('dd',), None),
(special.pbvv, cython_special._pbvv_pywrap, ('dd',), None),
(special.pbwa, cython_special._pbwa_pywrap, ('dd',), None),
(special.pdtr, cython_special.pdtr, ('ld', 'dd'), None),
(special.pdtrc, cython_special.pdtrc, ('ld', 'dd'), None),
(special.pdtri, cython_special.pdtri, ('ld', 'dd'), None),
(special.pdtrik, cython_special.pdtrik, ('dd',), None),
(special.poch, cython_special.poch, ('dd',), None),
(special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None),
(special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None),
(special.pro_cv, cython_special.pro_cv, ('ddd',), None),
(special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None),
(special.psi, cython_special.psi, ('d', 'D'), None),
(special.radian, cython_special.radian, ('ddd',), None),
(special.rel_entr, cython_special.rel_entr, ('dd',), None),
(special.rgamma, cython_special.rgamma, ('d', 'D'), None),
(special.round, cython_special.round, ('d',), None),
(special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None),
(special.sici, cython_special._sici_pywrap, ('d', 'D'), None),
(special.sindg, cython_special.sindg, ('d',), None),
(special.smirnov, cython_special.smirnov, ('ld', 'dd'), None),
(special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None),
(special.spence, cython_special.spence, ('d', 'D'), None),
(special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None),
(special.stdtr, cython_special.stdtr, ('dd',), None),
(special.stdtridf, cython_special.stdtridf, ('dd',), None),
(special.stdtrit, cython_special.stdtrit, ('dd',), None),
(special.struve, cython_special.struve, ('dd',), None),
(special.tandg, cython_special.tandg, ('d',), None),
(special.tklmbda, cython_special.tklmbda, ('dd',), None),
(special.wofz, cython_special.wofz, ('D',), None),
(special.wrightomega, cython_special.wrightomega, ('D',), None),
(special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None),
(special.xlogy, cython_special.xlogy, ('dd', 'DD'), None),
(special.y0, cython_special.y0, ('d',), None),
(special.y1, cython_special.y1, ('d',), None),
(special.yn, cython_special.yn, ('ld', 'dd'), None),
(special.yv, cython_special.yv, ('dd', 'dD'), None),
(special.yve, cython_special.yve, ('dd', 'dD'), None),
(special.zetac, cython_special.zetac, ('d',), None),
(special.owens_t, cython_special.owens_t, ('dd',), None)
]
IDS = [x[0].__name__ for x in PARAMS]
def _generate_test_points(typecodes):
axes = tuple(map(lambda x: TEST_POINTS[x], typecodes))
pts = list(product(*axes))
return pts
def test_cython_api_completeness():
# Check that everything is tested
skip = []
for name in dir(cython_special):
func = getattr(cython_special, name)
if callable(func) and not (name.startswith('_bench') or name in skip):
for _, cyfun, _, _ in PARAMS:
if cyfun is func:
break
else:
raise RuntimeError("{} missing from tests!".format(name))
@pytest.mark.parametrize("param", PARAMS, ids=IDS)
def test_cython_api(param):
pyfunc, cyfunc, specializations, knownfailure = param
if knownfailure:
pytest.xfail(reason=knownfailure)
# Check which parameters are expected to be fused types
values = [set() for code in specializations[0]]
for typecodes in specializations:
for j, v in enumerate(typecodes):
values[j].add(v)
seen = set()
is_fused_code = [False] * len(values)
for j, v in enumerate(values):
vv = tuple(sorted(v))
if vv in seen:
continue
is_fused_code[j] = (len(v) > 1)
seen.add(vv)
# Check results
for typecodes in specializations:
# Pick the correct specialized function
signature = []
for j, code in enumerate(typecodes):
if is_fused_code[j]:
signature.append(CYTHON_SIGNATURE_MAP[code])
if signature:
cy_spec_func = cyfunc[tuple(signature)]
else:
signature = None
cy_spec_func = cyfunc
# Test it
pts = _generate_test_points(typecodes)
for pt in pts:
pyval = pyfunc(*pt)
cyval = cy_spec_func(*pt)
assert_allclose(cyval, pyval, err_msg="{} {} {}".format(pt, typecodes, signature))
| 17,416 | 50.83631 | 94 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_digamma.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import pi, log, sqrt
from numpy.testing import assert_, assert_equal
from scipy.special._testutils import FuncData
import scipy.special as sc
# Euler-Mascheroni constant
euler = 0.57721566490153286
def test_consistency():
# Make sure the implementation of digamma for real arguments
# agrees with the implementation of digamma for complex arguments.
# It's all poles after -1e16
x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)]
dataset = np.vstack((x + 0j, sc.digamma(x))).T
FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check()
def test_special_values():
# Test special values from Gauss's digamma theorem. See
#
# https://en.wikipedia.org/wiki/Digamma_function
dataset = [(1, -euler),
(0.5, -2*log(2) - euler),
(1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler),
(1/4, -pi/2 - 3*log(2) - euler),
(1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler),
(1/8, -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)]
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
def test_nonfinite():
pts = [0.0, -0.0, np.inf]
std = [-np.inf, np.inf, np.inf]
assert_equal(sc.digamma(pts), std)
assert_(all(np.isnan(sc.digamma([-np.inf, -1]))))
| 1,460 | 31.466667 | 100 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_cdflib.py
|
"""
Test cdflib functions versus mpmath, if available.
The following functions still need tests:
- ncfdtr
- ncfdtri
- ncfdtridfn
- ncfdtridfd
- ncfdtrinc
- nbdtrik
- nbdtrin
- nrdtrimn
- nrdtrisd
- pdtrik
- nctdtr
- nctdtrit
- nctdtridf
- nctdtrinc
"""
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import assert_equal
import pytest
import scipy.special as sp
from scipy._lib.six import with_metaclass
from scipy.special._testutils import (
MissingModule, check_version, FuncData)
from scipy.special._mptestutils import (
Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
class ProbArg(object):
"""Generate a set of probabilities on [0, 1]."""
def __init__(self):
# Include the endpoints for compatibility with Arg et. al.
self.a = 0
self.b = 1
def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v)
class EndpointFilter(object):
def __init__(self, a, b, rtol, atol):
self.a = a
self.b = b
self.rtol = rtol
self.atol = atol
def __call__(self, x):
mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
return np.where(mask1 | mask2, False, True)
class _CDFData(object):
def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
dps=20, n=5000, rtol=None, atol=None,
endpt_rtol=None, endpt_atol=None):
self.spfunc = spfunc
self.mpfunc = mpfunc
self.index = index
self.argspec = argspec
self.spfunc_first = spfunc_first
self.dps = dps
self.n = n
self.rtol = rtol
self.atol = atol
if not isinstance(argspec, list):
self.endpt_rtol = None
self.endpt_atol = None
elif endpt_rtol is not None or endpt_atol is not None:
if isinstance(endpt_rtol, list):
self.endpt_rtol = endpt_rtol
else:
self.endpt_rtol = [endpt_rtol]*len(self.argspec)
if isinstance(endpt_atol, list):
self.endpt_atol = endpt_atol
else:
self.endpt_atol = [endpt_atol]*len(self.argspec)
else:
self.endpt_rtol = None
self.endpt_atol = None
def idmap(self, *args):
if self.spfunc_first:
res = self.spfunc(*args)
if np.isnan(res):
return np.nan
args = list(args)
args[self.index] = res
with mpmath.workdps(self.dps):
res = self.mpfunc(*tuple(args))
# Imaginary parts are spurious
res = mpf2float(res.real)
else:
with mpmath.workdps(self.dps):
res = self.mpfunc(*args)
res = mpf2float(res.real)
args = list(args)
args[self.index] = res
res = self.spfunc(*tuple(args))
return res
def get_param_filter(self):
if self.endpt_rtol is None and self.endpt_atol is None:
return None
filters = []
for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
if rtol is None and atol is None:
filters.append(None)
continue
elif rtol is None:
rtol = 0.0
elif atol is None:
atol = 0.0
filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
return filters
def check(self):
# Generate values for the arguments
args = get_args(self.argspec, self.n)
param_filter = self.get_param_filter()
param_columns = tuple(range(args.shape[1]))
result_columns = args.shape[1]
args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1)))
FuncData(self.idmap, args,
param_columns=param_columns, result_columns=result_columns,
rtol=self.rtol, atol=self.atol, vectorized=False,
param_filter=param_filter).check()
def _assert_inverts(*a, **kw):
d = _CDFData(*a, **kw)
d.check()
def _binomial_cdf(k, n, p):
k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
if k <= 0:
return mpmath.mpf(0)
elif k >= n:
return mpmath.mpf(1)
onemp = mpmath.fsub(1, p, exact=True)
return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
def _f_cdf(dfn, dfd, x):
if x < 0:
return mpmath.mpf(0)
dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
ub = dfn*x/(dfn*x + dfd)
res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
return res
def _student_t_cdf(df, t, dps=None):
if dps is None:
dps = mpmath.mp.dps
with mpmath.workdps(dps):
df, t = mpmath.mpf(df), mpmath.mpf(t)
fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
fac *= t*mpmath.gamma(0.5*(df + 1))
fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
return 0.5 + fac
def _noncentral_chi_pdf(t, df, nc):
res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
return res
def _noncentral_chi_cdf(x, df, nc, dps=None):
if dps is None:
dps = mpmath.mp.dps
x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
with mpmath.workdps(dps):
res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
return res
def _tukey_lmbda_quantile(p, lmbda):
# For lmbda != 0
return (p**lmbda - (1 - p)**lmbda)/lmbda
@pytest.mark.slow
@check_version(mpmath, '0.19')
class TestCDFlib(object):
@pytest.mark.xfail(run=False)
def test_bdtrik(self):
_assert_inverts(
sp.bdtrik,
_binomial_cdf,
0, [ProbArg(), IntArg(1, 1000), ProbArg()],
rtol=1e-4)
def test_bdtrin(self):
_assert_inverts(
sp.bdtrin,
_binomial_cdf,
1, [IntArg(1, 1000), ProbArg(), ProbArg()],
rtol=1e-4, endpt_atol=[None, None, 1e-6])
def test_btdtria(self):
_assert_inverts(
sp.btdtria,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-6)
def test_btdtrib(self):
# Use small values of a or mpmath doesn't converge
_assert_inverts(
sp.btdtrib,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15])
@pytest.mark.xfail(run=False)
def test_fdtridfd(self):
_assert_inverts(
sp.fdtridfd,
_f_cdf,
1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
rtol=1e-7)
def test_gdtria(self):
_assert_inverts(
sp.gdtria,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
0, [ProbArg(), Arg(0, 1e3, inclusive_a=False),
Arg(0, 1e4, inclusive_a=False)], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_gdtrib(self):
# Use small values of a and x or mpmath doesn't converge
_assert_inverts(
sp.gdtrib,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1e3, inclusive_a=False)], rtol=1e-5)
def test_gdtrix(self):
_assert_inverts(
sp.gdtrix,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_stdtr(self):
# Ideally the left endpoint for Arg() should be 0.
assert_mpmath_equal(
sp.stdtr,
_student_t_cdf,
[IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
@pytest.mark.xfail(run=False)
def test_stdtridf(self):
_assert_inverts(
sp.stdtridf,
_student_t_cdf,
0, [ProbArg(), Arg()], rtol=1e-7)
def test_stdtrit(self):
_assert_inverts(
sp.stdtrit,
_student_t_cdf,
1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-10])
def test_chdtriv(self):
_assert_inverts(
sp.chdtriv,
lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
@pytest.mark.xfail(run=False)
def test_chndtridf(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtridf,
_noncentral_chi_cdf,
1, [Arg(0, 100, inclusive_a=False), ProbArg(),
Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15)
@pytest.mark.xfail(run=False)
def test_chndtrinc(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrinc,
_noncentral_chi_cdf,
2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
n=1000, rtol=1e-4, atol=1e-15)
def test_chndtrix(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrix,
_noncentral_chi_cdf,
0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15,
endpt_atol=[1e-6, None, None])
def test_tklmbda_zero_shape(self):
# When lmbda = 0 the CDF has a simple closed form
one = mpmath.mpf(1)
assert_mpmath_equal(
lambda x: sp.tklmbda(x, 0),
lambda x: one/(mpmath.exp(-x) + one),
[Arg()], rtol=1e-7)
def test_tklmbda_neg_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
spfunc_first=False, rtol=1e-5,
endpt_atol=[1e-9, 1e-5])
@pytest.mark.xfail(run=False)
def test_tklmbda_pos_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
spfunc_first=False, rtol=1e-5)
def test_nonfinite():
funcs = [
("btdtria", 3),
("btdtrib", 3),
("bdtrik", 3),
("bdtrin", 3),
("chdtriv", 2),
("chndtr", 3),
("chndtrix", 3),
("chndtridf", 3),
("chndtrinc", 3),
("fdtridfd", 3),
("ncfdtr", 4),
("ncfdtri", 4),
("ncfdtridfn", 4),
("ncfdtridfd", 4),
("ncfdtrinc", 4),
("gdtrix", 3),
("gdtrib", 3),
("gdtria", 3),
("nbdtrik", 3),
("nbdtrin", 3),
("nrdtrimn", 3),
("nrdtrisd", 3),
("pdtrik", 2),
("stdtr", 2),
("stdtrit", 2),
("stdtridf", 2),
("nctdtr", 3),
("nctdtrit", 3),
("nctdtridf", 3),
("nctdtrinc", 3),
("tklmbda", 2),
]
np.random.seed(1)
for func, numargs in funcs:
func = getattr(sp, func)
args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in
np.random.rand(numargs)]
for args in itertools.product(*args_choices):
res = func(*args)
if any(np.isnan(x) for x in args):
# Nan inputs should result to nan output
assert_equal(res, np.nan)
else:
# All other inputs should return something (but not
# raise exceptions or cause hangs)
pass
| 12,495 | 29.478049 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_orthogonal.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, sqrt
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy._lib.six import xrange
from scipy import integrate
import scipy.special as sc
from scipy.special import gamma
import scipy.special.orthogonal as orth
class TestCheby(object):
def test_chebyc(self):
C0 = orth.chebyc(0)
C1 = orth.chebyc(1)
olderr = np.seterr(all='ignore')
try:
C2 = orth.chebyc(2)
C3 = orth.chebyc(3)
C4 = orth.chebyc(4)
C5 = orth.chebyc(5)
finally:
np.seterr(**olderr)
assert_array_almost_equal(C0.c,[2],13)
assert_array_almost_equal(C1.c,[1,0],13)
assert_array_almost_equal(C2.c,[1,0,-2],13)
assert_array_almost_equal(C3.c,[1,0,-3,0],13)
assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
def test_chebys(self):
S0 = orth.chebys(0)
S1 = orth.chebys(1)
S2 = orth.chebys(2)
S3 = orth.chebys(3)
S4 = orth.chebys(4)
S5 = orth.chebys(5)
assert_array_almost_equal(S0.c,[1],13)
assert_array_almost_equal(S1.c,[1,0],13)
assert_array_almost_equal(S2.c,[1,0,-1],13)
assert_array_almost_equal(S3.c,[1,0,-2,0],13)
assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
def test_chebyt(self):
T0 = orth.chebyt(0)
T1 = orth.chebyt(1)
T2 = orth.chebyt(2)
T3 = orth.chebyt(3)
T4 = orth.chebyt(4)
T5 = orth.chebyt(5)
assert_array_almost_equal(T0.c,[1],13)
assert_array_almost_equal(T1.c,[1,0],13)
assert_array_almost_equal(T2.c,[2,0,-1],13)
assert_array_almost_equal(T3.c,[4,0,-3,0],13)
assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
def test_chebyu(self):
U0 = orth.chebyu(0)
U1 = orth.chebyu(1)
U2 = orth.chebyu(2)
U3 = orth.chebyu(3)
U4 = orth.chebyu(4)
U5 = orth.chebyu(5)
assert_array_almost_equal(U0.c,[1],13)
assert_array_almost_equal(U1.c,[2,0],13)
assert_array_almost_equal(U2.c,[4,0,-1],13)
assert_array_almost_equal(U3.c,[8,0,-4,0],13)
assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
class TestGegenbauer(object):
def test_gegenbauer(self):
a = 5*np.random.random() - 0.5
if np.any(a == 0):
a = -0.2
Ca0 = orth.gegenbauer(0,a)
Ca1 = orth.gegenbauer(1,a)
Ca2 = orth.gegenbauer(2,a)
Ca3 = orth.gegenbauer(3,a)
Ca4 = orth.gegenbauer(4,a)
Ca5 = orth.gegenbauer(5,a)
assert_array_almost_equal(Ca0.c,array([1]),13)
assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
assert_array_almost_equal(Ca3.c,array([4*orth.poch(a,3),0,-6*a*(a+1),
0])/3.0,11)
assert_array_almost_equal(Ca4.c,array([4*orth.poch(a,4),0,-12*orth.poch(a,3),
0,3*a*(a+1)])/6.0,11)
assert_array_almost_equal(Ca5.c,array([4*orth.poch(a,5),0,-20*orth.poch(a,4),
0,15*orth.poch(a,3),0])/15.0,11)
class TestHermite(object):
def test_hermite(self):
H0 = orth.hermite(0)
H1 = orth.hermite(1)
H2 = orth.hermite(2)
H3 = orth.hermite(3)
H4 = orth.hermite(4)
H5 = orth.hermite(5)
assert_array_almost_equal(H0.c,[1],13)
assert_array_almost_equal(H1.c,[2,0],13)
assert_array_almost_equal(H2.c,[4,0,-2],13)
assert_array_almost_equal(H3.c,[8,0,-12,0],13)
assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
def test_hermitenorm(self):
# He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
psub = np.poly1d([1.0/sqrt(2),0])
H0 = orth.hermitenorm(0)
H1 = orth.hermitenorm(1)
H2 = orth.hermitenorm(2)
H3 = orth.hermitenorm(3)
H4 = orth.hermitenorm(4)
H5 = orth.hermitenorm(5)
he0 = orth.hermite(0)(psub)
he1 = orth.hermite(1)(psub) / sqrt(2)
he2 = orth.hermite(2)(psub) / 2.0
he3 = orth.hermite(3)(psub) / (2*sqrt(2))
he4 = orth.hermite(4)(psub) / 4.0
he5 = orth.hermite(5)(psub) / (4.0*sqrt(2))
assert_array_almost_equal(H0.c,he0.c,13)
assert_array_almost_equal(H1.c,he1.c,13)
assert_array_almost_equal(H2.c,he2.c,13)
assert_array_almost_equal(H3.c,he3.c,13)
assert_array_almost_equal(H4.c,he4.c,13)
assert_array_almost_equal(H5.c,he5.c,13)
class _test_sh_legendre(object):
def test_sh_legendre(self):
# P*_n(x) = P_n(2x-1)
psub = np.poly1d([2,-1])
Ps0 = orth.sh_legendre(0)
Ps1 = orth.sh_legendre(1)
Ps2 = orth.sh_legendre(2)
Ps3 = orth.sh_legendre(3)
Ps4 = orth.sh_legendre(4)
Ps5 = orth.sh_legendre(5)
pse0 = orth.legendre(0)(psub)
pse1 = orth.legendre(1)(psub)
pse2 = orth.legendre(2)(psub)
pse3 = orth.legendre(3)(psub)
pse4 = orth.legendre(4)(psub)
pse5 = orth.legendre(5)(psub)
assert_array_almost_equal(Ps0.c,pse0.c,13)
assert_array_almost_equal(Ps1.c,pse1.c,13)
assert_array_almost_equal(Ps2.c,pse2.c,13)
assert_array_almost_equal(Ps3.c,pse3.c,13)
assert_array_almost_equal(Ps4.c,pse4.c,12)
assert_array_almost_equal(Ps5.c,pse5.c,12)
class _test_sh_chebyt(object):
def test_sh_chebyt(self):
# T*_n(x) = T_n(2x-1)
psub = np.poly1d([2,-1])
Ts0 = orth.sh_chebyt(0)
Ts1 = orth.sh_chebyt(1)
Ts2 = orth.sh_chebyt(2)
Ts3 = orth.sh_chebyt(3)
Ts4 = orth.sh_chebyt(4)
Ts5 = orth.sh_chebyt(5)
tse0 = orth.chebyt(0)(psub)
tse1 = orth.chebyt(1)(psub)
tse2 = orth.chebyt(2)(psub)
tse3 = orth.chebyt(3)(psub)
tse4 = orth.chebyt(4)(psub)
tse5 = orth.chebyt(5)(psub)
assert_array_almost_equal(Ts0.c,tse0.c,13)
assert_array_almost_equal(Ts1.c,tse1.c,13)
assert_array_almost_equal(Ts2.c,tse2.c,13)
assert_array_almost_equal(Ts3.c,tse3.c,13)
assert_array_almost_equal(Ts4.c,tse4.c,12)
assert_array_almost_equal(Ts5.c,tse5.c,12)
class _test_sh_chebyu(object):
def test_sh_chebyu(self):
# U*_n(x) = U_n(2x-1)
psub = np.poly1d([2,-1])
Us0 = orth.sh_chebyu(0)
Us1 = orth.sh_chebyu(1)
Us2 = orth.sh_chebyu(2)
Us3 = orth.sh_chebyu(3)
Us4 = orth.sh_chebyu(4)
Us5 = orth.sh_chebyu(5)
use0 = orth.chebyu(0)(psub)
use1 = orth.chebyu(1)(psub)
use2 = orth.chebyu(2)(psub)
use3 = orth.chebyu(3)(psub)
use4 = orth.chebyu(4)(psub)
use5 = orth.chebyu(5)(psub)
assert_array_almost_equal(Us0.c,use0.c,13)
assert_array_almost_equal(Us1.c,use1.c,13)
assert_array_almost_equal(Us2.c,use2.c,13)
assert_array_almost_equal(Us3.c,use3.c,13)
assert_array_almost_equal(Us4.c,use4.c,12)
assert_array_almost_equal(Us5.c,use5.c,11)
class _test_sh_jacobi(object):
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
conv = lambda n,p: gamma(n+1)*gamma(n+p)/gamma(2*n+p)
psub = np.poly1d([2,-1])
q = 4 * np.random.random()
p = q-1 + 2*np.random.random()
# print("shifted jacobi p,q = ", p, q)
G0 = orth.sh_jacobi(0,p,q)
G1 = orth.sh_jacobi(1,p,q)
G2 = orth.sh_jacobi(2,p,q)
G3 = orth.sh_jacobi(3,p,q)
G4 = orth.sh_jacobi(4,p,q)
G5 = orth.sh_jacobi(5,p,q)
ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_array_almost_equal(G0.c,ge0.c,13)
assert_array_almost_equal(G1.c,ge1.c,13)
assert_array_almost_equal(G2.c,ge2.c,13)
assert_array_almost_equal(G3.c,ge3.c,13)
assert_array_almost_equal(G4.c,ge4.c,13)
assert_array_almost_equal(G5.c,ge5.c,13)
class TestCall(object):
def test_call(self):
poly = []
for n in xrange(5):
poly.extend([x.strip() for x in
("""
orth.jacobi(%(n)d,0.3,0.9)
orth.sh_jacobi(%(n)d,0.3,0.9)
orth.genlaguerre(%(n)d,0.3)
orth.laguerre(%(n)d)
orth.hermite(%(n)d)
orth.hermitenorm(%(n)d)
orth.gegenbauer(%(n)d,0.3)
orth.chebyt(%(n)d)
orth.chebyu(%(n)d)
orth.chebyc(%(n)d)
orth.chebys(%(n)d)
orth.sh_chebyt(%(n)d)
orth.sh_chebyu(%(n)d)
orth.legendre(%(n)d)
orth.sh_legendre(%(n)d)
""" % dict(n=n)).split()
])
olderr = np.seterr(all='ignore')
try:
for pstr in poly:
p = eval(pstr)
assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315),
err_msg=pstr)
finally:
np.seterr(**olderr)
class TestGenlaguerre(object):
def test_regression(self):
assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.)
assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.)
assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2]))
assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2]))
def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N,
rtol=1e-15, atol=1e-14):
# this test is copied from numpy's TestGauss in test_hermite.py
x, w, mu = root_func(N, True)
n = np.arange(N)
v = eval_func(n[:,np.newaxis], x)
vv = np.dot(v*w, v.T)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, np.newaxis] * vv * vd
assert_allclose(vv, np.eye(N), rtol, atol)
# check that the integral of 1 is correct
assert_allclose(w.sum(), mu, rtol, atol)
# compare the results of integrating a function with quad.
f = lambda x: x**3 - 3*x**2 + x - 2
resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b)
resG = np.vdot(f(x), w)
rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10
assert_allclose(resI[0], resG, rtol=rtol)
def test_roots_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: orth.eval_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1 - x)**a * (1 + x)**b
vgq = verify_gauss_quad
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
25, atol=1e-12)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
100, atol=1e-11)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=2e-13)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1.,
100, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1.,
100, atol=1e-11)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25, atol=1e-13)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1.,
100, atol=1e-13)
# when alpha == beta == 0, P_n^{a,b}(x) == P_n(x)
xj, wj = sc.roots_jacobi(6, 0.0, 0.0)
xl, wl = sc.roots_legendre(6)
assert_allclose(xj, xl, 1e-14, 1e-14)
assert_allclose(wj, wl, 1e-14, 1e-14)
# when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x)
xj, wj = sc.roots_jacobi(6, 4.0, 4.0)
xc, wc = sc.roots_gegenbauer(6, 4.5)
assert_allclose(xj, xc, 1e-14, 1e-14)
assert_allclose(wj, wc, 1e-14, 1e-14)
x, w = sc.roots_jacobi(5, 2, 3, False)
y, v, m = sc.roots_jacobi(5, 2, 3, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(2,3), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2)
def test_roots_sh_jacobi():
rf = lambda a, b: lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu)
ef = lambda a, b: lambda n, x: orth.eval_sh_jacobi(n, a, b, x)
wf = lambda a, b: lambda x: (1. - x)**(a - b) * (x)**(b - 1.)
vgq = verify_gauss_quad
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
25, atol=1e-12)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
100, atol=1e-11)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1.,
100, atol=1e-13)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_jacobi(5, 3, 2, False)
y, v, m = sc.roots_sh_jacobi(5, 3, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(3,2), 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0
assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both
def test_roots_hermite():
rootf = sc.roots_hermite
evalf = orth.eval_hermite
weightf = orth.hermite(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
# Golub-Welsch branch
x, w = sc.roots_hermite(5, False)
y, v, m = sc.roots_hermite(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
# Asymptotic branch (switch over at n >= 150)
x, w = sc.roots_hermite(200, False)
y, v, m = sc.roots_hermite(200, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
assert_allclose(sum(v), m, 1e-14, 1e-14)
assert_raises(ValueError, sc.roots_hermite, 0)
assert_raises(ValueError, sc.roots_hermite, 3.3)
def test_roots_hermite_asy():
# Recursion for Hermite functions
def hermite_recursion(n, nodes):
H = np.zeros((n, nodes.size))
H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
if n > 1:
H[1,:] = sqrt(2.0) * nodes * H[0,:]
for k in xrange(2, n):
H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
return H
# This tests only the nodes
def test(N, rtol=1e-15, atol=1e-14):
x, w = orth._roots_hermite_asy(N)
H = hermite_recursion(N+1, x)
assert_allclose(H[-1,:], np.zeros(N), rtol, atol)
assert_allclose(sum(w), sqrt(np.pi), rtol, atol)
test(150, atol=1e-12)
test(151, atol=1e-12)
test(300, atol=1e-12)
test(301, atol=1e-12)
test(500, atol=1e-12)
test(501, atol=1e-12)
test(999, atol=1e-12)
test(1000, atol=1e-12)
test(2000, atol=1e-12)
test(5000, atol=1e-12)
def test_roots_hermitenorm():
rootf = sc.roots_hermitenorm
evalf = orth.eval_hermitenorm
weightf = orth.hermitenorm(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
x, w = sc.roots_hermitenorm(5, False)
y, v, m = sc.roots_hermitenorm(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_hermitenorm, 0)
assert_raises(ValueError, sc.roots_hermitenorm, 3.3)
def test_roots_gegenbauer():
rootf = lambda a: lambda n, mu: sc.roots_gegenbauer(n, a, mu)
evalf = lambda a: lambda n, x: orth.eval_gegenbauer(n, a, x)
weightf = lambda a: lambda x: (1 - x**2)**(a - 0.5)
vgq = verify_gauss_quad
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11)
# this is a special case that the old code supported.
# when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes
# to a scaled down copy of T_n(x) there.
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 5)
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 25)
vgq(rootf(0), orth.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12)
x, w = sc.roots_gegenbauer(5, 2, False)
y, v, m = sc.roots_gegenbauer(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_gegenbauer, 0, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75)
def test_roots_chebyt():
weightf = orth.chebyt(5).weight_func
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyt, orth.eval_chebyt, weightf, -1., 1., 100, atol=1e-12)
x, w = sc.roots_chebyt(5, False)
y, v, m = sc.roots_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyt, 0)
assert_raises(ValueError, sc.roots_chebyt, 3.3)
def test_chebyt_symmetry():
x, w = sc.roots_chebyt(21)
pos, neg = x[:10], x[11:]
assert_equal(neg, -pos[::-1])
assert_equal(x[10], 0)
def test_roots_chebyu():
weightf = orth.chebyu(5).weight_func
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyu, orth.eval_chebyu, weightf, -1., 1., 100)
x, w = sc.roots_chebyu(5, False)
y, v, m = sc.roots_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyu, 0)
assert_raises(ValueError, sc.roots_chebyu, 3.3)
def test_roots_chebyc():
weightf = orth.chebyc(5).weight_func
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebyc, orth.eval_chebyc, weightf, -2., 2., 100, atol=1e-12)
x, w = sc.roots_chebyc(5, False)
y, v, m = sc.roots_chebyc(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyc, 0)
assert_raises(ValueError, sc.roots_chebyc, 3.3)
def test_roots_chebys():
weightf = orth.chebys(5).weight_func
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebys, orth.eval_chebys, weightf, -2., 2., 100)
x, w = sc.roots_chebys(5, False)
y, v, m = sc.roots_chebys(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebys, 0)
assert_raises(ValueError, sc.roots_chebys, 3.3)
def test_roots_sh_chebyt():
weightf = orth.sh_chebyt(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyt, orth.eval_sh_chebyt, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyt(5, False)
y, v, m = sc.roots_sh_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyt, 0)
assert_raises(ValueError, sc.roots_sh_chebyt, 3.3)
def test_roots_sh_chebyu():
weightf = orth.sh_chebyu(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyu, orth.eval_sh_chebyu, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyu(5, False)
y, v, m = sc.roots_sh_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyu, 0)
assert_raises(ValueError, sc.roots_sh_chebyu, 3.3)
def test_roots_legendre():
weightf = orth.legendre(5).weight_func
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_legendre, orth.eval_legendre, weightf, -1., 1.,
100, atol=1e-12)
x, w = sc.roots_legendre(5, False)
y, v, m = sc.roots_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_legendre, 0)
assert_raises(ValueError, sc.roots_legendre, 3.3)
def test_roots_sh_legendre():
weightf = orth.sh_legendre(5).weight_func
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_sh_legendre, orth.eval_sh_legendre, weightf, 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_legendre(5, False)
y, v, m = sc.roots_sh_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_legendre, 0)
assert_raises(ValueError, sc.roots_sh_legendre, 3.3)
def test_roots_laguerre():
weightf = orth.laguerre(5).weight_func
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf, 5)
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf,
25, atol=1e-13)
verify_gauss_quad(sc.roots_laguerre, orth.eval_laguerre, weightf, 0., np.inf,
100, atol=1e-12)
x, w = sc.roots_laguerre(5, False)
y, v, m = sc.roots_laguerre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_laguerre, 0)
assert_raises(ValueError, sc.roots_laguerre, 3.3)
def test_roots_genlaguerre():
rootf = lambda a: lambda n, mu: sc.roots_genlaguerre(n, a, mu)
evalf = lambda a: lambda n, x: orth.eval_genlaguerre(n, a, x)
weightf = lambda a: lambda x: x**a * np.exp(-x)
vgq = verify_gauss_quad
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13)
x, w = sc.roots_genlaguerre(5, 2, False)
y, v, m = sc.roots_genlaguerre(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2.), 0., np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_genlaguerre, 0, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1)
def test_gh_6721():
# Regresssion test for gh_6721. This should not raise.
sc.chebyt(65)(0.2)
| 29,554 | 38.042272 | 91 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_nan_inputs.py
|
"""Test how the ufuncs in special handle nan inputs.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_
import pytest
import scipy.special as sc
from scipy._lib._numpy_compat import suppress_warnings
KNOWNFAILURES = {}
POSTPROCESSING = {
sc.hyp2f0: lambda x, y: x # Second argument is an error estimate
}
def _get_ufuncs():
ufuncs = []
ufunc_names = []
for name in sorted(sc.__dict__):
obj = sc.__dict__[name]
if not isinstance(obj, np.ufunc):
continue
msg = KNOWNFAILURES.get(obj)
if msg is None:
ufuncs.append(obj)
ufunc_names.append(name)
else:
fail = pytest.mark.xfail(run=False, reason=msg)
ufuncs.append(pytest.param(obj, marks=fail))
ufunc_names.append(name)
return ufuncs, ufunc_names
UFUNCS, UFUNC_NAMES = _get_ufuncs()
@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES)
def test_nan_inputs(func):
args = (np.nan,)*func.nin
with suppress_warnings() as sup:
# Ignore warnings about unsafe casts from legacy wrappers
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
try:
res = func(*args)
except TypeError:
# One of the arguments doesn't take real inputs
return
if func in POSTPROCESSING:
res = POSTPROCESSING[func](*res)
msg = "got {} instead of nan".format(res)
assert_array_equal(np.isnan(res), True, err_msg=msg)
def test_legacy_cast():
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
res = sc.bdtrc(np.nan, 1, 0.5)
assert_(np.isnan(res))
| 1,846 | 26.567164 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_basic.py
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
from __future__ import division, print_function, absolute_import
import sys
import platform
import itertools
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
import pytest
from pytest import raises as assert_raises
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, assert_allclose,
assert_array_almost_equal_nulp)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk, zeta
from scipy.special._testutils import with_special_errors, \
assert_func_equal, FuncData
from scipy._lib._numpy_compat import suppress_warnings
from scipy._lib._version import NumpyVersion
import math
class TestCephes(object):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_binom_nooverflow_8346(self):
# Test (binom(n, k) doesn't overflow prematurely */
dataset = [
(1000, 500, 2.70288240945436551e+299),
(1002, 501, 1.08007396880791225e+300),
(1004, 502, 4.31599279169058121e+300),
(1006, 503, 1.72468101616263781e+301),
(1008, 504, 6.89188009236419153e+301),
(1010, 505, 2.75402257948335448e+302),
(1012, 506, 1.10052048531923757e+303),
(1014, 507, 4.39774063758732849e+303),
(1016, 508, 1.75736486108312519e+304),
(1018, 509, 7.02255427788423734e+304),
(1020, 510, 2.80626776829962255e+305),
(1022, 511, 1.12140876377061240e+306),
(1024, 512, 4.48125455209897109e+306),
(1026, 513, 1.79075474304149900e+307),
(1028, 514, 7.15605105487789676e+307)
]
dataset = np.asarray(dataset)
FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check()
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
# Each row holds (x, nu, lam, expected_value)
# These values were computed using Wolfram Alpha with
# CDF[NoncentralChiSquareDistribution[nu, lam], x]
values = np.array([
[25.00, 20.0, 400, 4.1210655112396197139e-57],
[25.00, 8.00, 250, 2.3988026526832425878e-29],
[0.001, 8.00, 40., 5.3761806201366039084e-24],
[0.010, 8.00, 40., 5.45396231055999457039e-20],
[20.00, 2.00, 107, 1.39390743555819597802e-9],
[22.50, 2.00, 107, 7.11803307138105870671e-9],
[25.00, 2.00, 107, 3.11041244829864897313e-8],
[3.000, 2.00, 1.0, 0.62064365321954362734],
[350.0, 300., 10., 0.93880128006276407710],
[100.0, 13.5, 10., 0.99999999650104210949],
[700.0, 20.0, 400, 0.99999999925680650105],
[150.0, 13.5, 10., 0.99999999999999983046],
[160.0, 13.5, 10., 0.99999999999999999518], # 1.0
])
cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2])
assert_allclose(cdf, values[:, 3], rtol=1e-12)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
# Earlier numpy version don't guarantee that npy_cexp conforms to C99.
@pytest.mark.skipif(NumpyVersion(np.__version__) < '1.9.0', reason='')
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@pytest.mark.xfail(reason='The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1, 1, 0), 0.0)
# Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10]
assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488,
rtol=1e-12)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1, 1, 0), 1.0)
# Computed using Wolfram Alpha:
# 1 - CDF[FRatioDistribution[2, 1/10], 1e10]
assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512,
rtol=1e-12)
def test_fdtri(self):
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
# From Wolfram Alpha:
# CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874...
p = 0.8756751669632105666874
assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12)
@pytest.mark.xfail(reason='Returns nan on i686.')
def test_fdtri_mysterious_failure(self):
assert_allclose(cephes.fdtri(1, 1, 0.5), 1)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
# earlier numpy version don't guarantee that npy_clog conforms to C99
@pytest.mark.skipif(NumpyVersion(np.__version__) < '1.9.0', reason='')
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0)
f = [0.5, 1, 1.5]
p = cephes.ncfdtr(2, 3, 1.5, f)
assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f)
def test_ncfdtridfd(self):
dfd = [1, 2, 3]
p = cephes.ncfdtr(2, dfd, 0.25, 15)
assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd)
def test_ncfdtridfn(self):
dfn = [0.1, 1, 2, 3, 1e4]
p = cephes.ncfdtr(dfn, 2, 0.25, 15)
assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5)
def test_ncfdtrinc(self):
nc = [0.5, 1.5, 2.0]
p = cephes.ncfdtr(2, 3, nc, 15)
assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(object):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(object):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(object):
def test_besselpoly(self):
pass
class TestKelvin(object):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(object):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(object):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(object):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(object):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(object):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(object):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(object):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparison
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`errprint` is deprecated!")
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
d = special.errprint(a) # returns to original state
assert_equal(a,c)
assert_equal(d,b) # makes sure state was returned
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler(object):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(object):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(object):
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(object):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(object):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincinf(self):
gama = special.gammainc(0.5, np.inf)
assert_equal(gama,1.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinf(self):
gama = special.gammaincc(0.5,np.inf)
assert_equal(gama,0.0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_allclose(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(object):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(object):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(object):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_allclose(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_allclose(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_allclose(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_allclose(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_allclose(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@pytest.mark.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_allclose(special.jv(3, 4), 0.43017147387562193)
assert_allclose(special.jv(301, 1300), 0.0183487151115275)
assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_allclose(special.jv(-1, 1), -0.4400505857449335)
assert_allclose(special.jv(-2, 1), 0.1149034849319005)
assert_allclose(special.yv(-1, 1), 0.7812128213002887)
assert_allclose(special.yv(-2, 1), -1.650682606816255)
assert_allclose(special.iv(-1, 1), 0.5651591039924851)
assert_allclose(special.iv(-2, 1), 0.1357476697670383)
assert_allclose(special.kv(-1, 1), 0.6019072301972347)
assert_allclose(special.kv(-2, 1), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_gh_7909(self):
assert_(special.kv(1.5, 0) == np.inf)
assert_(special.kve(1.5, 0) == np.inf)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_allclose(special.iv(1, 700), 1.528500390233901e302)
assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_allclose(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_allclose(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(object):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(object):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(object):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(object):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(object):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(object):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(object):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(object):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(object):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(object):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(object):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(object):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(object):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(object):
def test_riccati_jn(self):
N, x = 2, 0.2
S = np.empty((N, N))
for n in range(N):
j = special.spherical_jn(n, x)
jp = special.spherical_jn(n, x, derivative=True)
S[0,n] = x*j
S[1,n] = x*jp + j
assert_array_almost_equal(S, special.riccati_jn(n, x), 8)
def test_riccati_yn(self):
N, x = 2, 0.2
C = np.empty((N, N))
for n in range(N):
y = special.spherical_yn(n, x)
yp = special.spherical_yn(n, x, derivative=True)
C[0,n] = x*y
C[1,n] = x*yp + y
assert_array_almost_equal(C, special.riccati_yn(n, x), 8)
class TestRound(object):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(sh(0,0,0,0),
0.5/sqrt(pi))
assert_array_almost_equal(sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
assert_array_almost_equal(sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
assert_array_almost_equal(sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
assert_array_almost_equal(sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
assert_array_almost_equal(sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
rtol = 1e-13
# Gauss's constant
assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186,
rtol=rtol)
# These values were computed using Wolfram Alpha, with the
# function ArithmeticGeometricMean[a, b].
agm13 = 1.863616783244897
agm15 = 2.604008190530940
agm35 = 3.936235503649555
assert_allclose(special.agm([[1], [3]], [1, 3, 5]),
[[1, agm13, agm15],
[agm13, 3, agm35]], rtol=rtol)
# Computed by the iteration formula using mpmath,
# with mpmath.mp.prec = 1000:
agm12 = 1.4567910310469068
assert_allclose(special.agm(1, 2), agm12, rtol=rtol)
assert_allclose(special.agm(2, 1), agm12, rtol=rtol)
assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol)
assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol)
assert_allclose(special.agm(13, 123456789.5), 11111458.498599306,
rtol=rtol)
assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol)
assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol)
assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178,
rtol=rtol)
assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177,
rtol=rtol)
assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152,
rtol=rtol)
fi = np.finfo(1.0)
assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305,
rtol=rtol)
assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308,
rtol=rtol)
assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308,
rtol=rtol)
# zero, nan and inf cases.
assert_equal(special.agm(0, 0), 0)
assert_equal(special.agm(99, 0), 0)
assert_equal(special.agm(-1, 10), np.nan)
assert_equal(special.agm(0, np.inf), np.nan)
assert_equal(special.agm(np.inf, 0), np.nan)
assert_equal(special.agm(0, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, 0), np.nan)
assert_equal(special.agm(np.inf, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, np.inf), np.nan)
assert_equal(special.agm(1, np.nan), np.nan)
assert_equal(special.agm(np.nan, -1), np.nan)
assert_equal(special.agm(1, np.inf), np.inf)
assert_equal(special.agm(np.inf, 1), np.inf)
assert_equal(special.agm(-1, -np.inf), -np.inf)
assert_equal(special.agm(-np.inf, -1), -np.inf)
def test_legacy():
# Legacy behavior: truncating arguments to integers
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
| 133,893 | 37.75369 | 110 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_round.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.special import _test_round
@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()")
def test_add_round_up():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'up')
@pytest.mark.skipif(not _test_round.have_fenv(), reason="no fenv()")
def test_add_round_down():
np.random.seed(1234)
_test_round.test_add_round(10**5, 'down')
| 472 | 23.894737 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_ellip_harm.py
|
#
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_)
from scipy._lib._numpy_compat import suppress_warnings
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "The maximum number of subdivisions")
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
assert_(abs(result - exact) < 10*abs(last_term), err_msg)
def test_ellip_norm():
def G01(h2, k2):
return 4*pi
def G11(h2, k2):
return 4*pi*h2*k2/3
def G12(h2, k2):
return 4*pi*h2*(k2 - h2)/3
def G13(h2, k2):
return 4*pi*k2*(k2 - h2)/3
def G22(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G21(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+ sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G23(h2, k2):
return 4*pi*h2**2*k2*(k2 - h2)/15
def G24(h2, k2):
return 4*pi*h2*k2**2*(k2 - h2)/15
def G25(h2, k2):
return 4*pi*h2*k2*(k2 - h2)**2/15
def G32(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
11*h2*k2*(h2 + k2)))
return 16*pi/13125*k2*h2*res
def G31(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
11*h2*k2*(h2 + k2)))
return 16*pi/13125*h2*k2*res
def G34(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G33(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G36(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G35(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G37(h2, k2):
return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
(2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
(2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
(3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
def _ellip_norm(n, p, h2, k2):
func = known_funcs[n, p]
return func(h2, k2)
_ellip_norm = np.vectorize(_ellip_norm)
def ellip_normal_known(h2, k2, n, p):
return _ellip_norm(n, p, h2, k2)
# generate both large and small h2 < k2 pairs
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=1)
k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
points = []
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2, k2, n*np.ones(h2.size), p*np.ones(h2.size)))
points = np.array(points)
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
def test_ellip_harm_2():
def I1(h2, k2, s):
res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+ ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
return res
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
# Values produced by code from arXiv:1204.0267
assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
def test_ellip_harm():
def E01(h2, k2, s):
return 1
def E11(h2, k2, s):
return s
def E12(h2, k2, s):
return sqrt(abs(s*s - h2))
def E13(h2, k2, s):
return sqrt(abs(s*s - k2))
def E21(h2, k2, s):
return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E22(h2, k2, s):
return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E23(h2, k2, s):
return s * sqrt(abs(s*s - h2))
def E24(h2, k2, s):
return s * sqrt(abs(s*s - k2))
def E25(h2, k2, s):
return sqrt(abs((s*s - h2)*(s*s - k2)))
def E31(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E32(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E33(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E34(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E35(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E36(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E37(h2, k2, s):
return s * sqrt(abs((s*s - h2)*(s*s - k2)))
assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
ellip_harm(5, 8, 1, 2, 2.5))
known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
(2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
(2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
(3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
point_ref = []
def ellip_harm_known(h2, k2, n, p, s):
for i in range(h2.size):
func = known_funcs[(int(n[i]), int(p[i]))]
point_ref.append(func(h2[i], k2[i], s[i]))
return point_ref
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=30)
k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
s = np.random.pareto(0.5, size=h2.size)
points = []
for i in range(h2.size):
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2[i], k2[i], n, p, s[i]))
points = np.array(points)
assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
| 9,536 | 33.806569 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_wrightomega.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal
import scipy.special as sc
def test_wrightomega_nan():
pts = [complex(np.nan, 0),
complex(0, np.nan),
complex(np.nan, np.nan),
complex(np.nan, 1),
complex(1, np.nan)]
for p in pts:
res = sc.wrightomega(p)
assert_(np.isnan(res.real))
assert_(np.isnan(res.imag))
def test_wrightomega_inf_branch():
pts = [complex(-np.inf, np.pi/4),
complex(-np.inf, -np.pi/4),
complex(-np.inf, 3*np.pi/4),
complex(-np.inf, -3*np.pi/4)]
expected_results = [complex(0.0, 0.0),
complex(0.0, -0.0),
complex(-0.0, 0.0),
complex(-0.0, -0.0)]
for p, expected in zip(pts, expected_results):
res = sc.wrightomega(p)
# We can't use assert_equal(res, expected) because in older versions of
# numpy, assert_equal doesn't check the sign of the real and imaginary
# parts when comparing complex zeros. It does check the sign when the
# arguments are *real* scalars.
assert_equal(res.real, expected.real)
assert_equal(res.imag, expected.imag)
def test_wrightomega_inf():
pts = [complex(np.inf, 10),
complex(-np.inf, 10),
complex(10, np.inf),
complex(10, -np.inf)]
for p in pts:
assert_equal(sc.wrightomega(p), p)
def test_wrightomega_singular():
pts = [complex(-1.0, np.pi),
complex(-1.0, -np.pi)]
for p in pts:
res = sc.wrightomega(p)
assert_equal(res, -1.0)
assert_(np.signbit(res.imag) == False)
| 1,748 | 30.232143 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_lambertw.py
|
#
# Tests for the lambertw function,
# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
# Distributed under the same license as SciPy itself.
#
# [1] mpmath source code, Subversion revision 992
# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_almost_equal
from scipy.special import lambertw
from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
from scipy.special._testutils import FuncData
def test_values():
assert_(isnan(lambertw(nan)))
assert_equal(lambertw(inf,1).real, inf)
assert_equal(lambertw(inf,1).imag, 2*pi)
assert_equal(lambertw(-inf,1).real, inf)
assert_equal(lambertw(-inf,1).imag, 3*pi)
assert_equal(lambertw(1.), lambertw(1., 0))
data = [
(0,0, 0),
(0+0j,0, 0),
(inf,0, inf),
(0,-1, -inf),
(0,1, -inf),
(0,3, -inf),
(e,0, 1),
(1,0, 0.567143290409783873),
(-pi/2,0, 1j*pi/2),
(-log(2)/2,0, -log(2)),
(0.25,0, 0.203888354702240164),
(-0.25,0, -0.357402956181388903),
(-1./10000,0, -0.000100010001500266719),
(-0.25,-1, -2.15329236411034965),
(0.25,-1, -3.00899800997004620-4.07652978899159763j),
(-0.25,-1, -2.15329236411034965),
(0.25,1, -3.00899800997004620+4.07652978899159763j),
(-0.25,1, -3.48973228422959210+7.41405453009603664j),
(-4,0, 0.67881197132094523+1.91195078174339937j),
(-4,1, -0.66743107129800988+7.76827456802783084j),
(-4,-1, 0.67881197132094523-1.91195078174339937j),
(1000,0, 5.24960285240159623),
(1000,1, 4.91492239981054535+5.44652615979447070j),
(1000,-1, 4.91492239981054535-5.44652615979447070j),
(1000,5, 3.5010625305312892+29.9614548941181328j),
(3+4j,0, 1.281561806123775878+0.533095222020971071j),
(-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
(3+4j,1, -0.11691092896595324+5.61888039871282334j),
(3+4j,-1, 0.25856740686699742-3.85211668616143559j),
(-0.5,-1, -0.794023632344689368-0.770111750510379110j),
(-1./10000,1, -11.82350837248724344+6.80546081842002101j),
(-1./10000,-1, -11.6671145325663544),
(-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
(-1./100000,4, -14.9186890769540539+26.1856750178782046j),
(-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
(-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
(-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
(-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
(-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
(pi,0, 1.073658194796149172092178407024821347547745350410314531),
# Former bug in generated branch,
(-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
(-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
(-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
(-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
]
data = array(data, dtype=complex_)
def w(x, y):
return lambertw(x, y.real.astype(int))
olderr = np.seterr(all='ignore')
try:
FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
finally:
np.seterr(**olderr)
def test_ufunc():
assert_array_almost_equal(
lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
def test_lambertw_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(lambertw(0, 0, 0).dtype, dt)
assert_equal(lambertw([0], 0, 0).dtype, dt)
assert_equal(lambertw(0, [0], 0).dtype, dt)
assert_equal(lambertw(0, 0, [0]).dtype, dt)
assert_equal(lambertw([0], [0], [0]).dtype, dt)
| 4,318 | 40.528846 | 107 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_owens_t.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
def test_symmetries():
np.random.seed(1234)
a, h = np.random.rand(100), np.random.rand(100)
assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a))
assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a))
def test_special_cases():
assert_equal(sc.owens_t(5, 0), 0)
assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi,
rtol=5e-14)
# Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the
# standard normal distribution
assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07,
rtol=5e-14)
def test_nans():
assert_equal(sc.owens_t(20, np.nan), np.nan)
assert_equal(sc.owens_t(np.nan, 20), np.nan)
assert_equal(sc.owens_t(np.nan, np.nan), np.nan)
def test_infs():
h = 1
res = 0.5*sc.erfc(h/np.sqrt(2))
assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14)
assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14)
assert_equal(sc.owens_t(np.inf, 1), 0)
assert_equal(sc.owens_t(-np.inf, 1), 0)
assert_equal(sc.owens_t(np.inf, np.inf), 0)
assert_equal(sc.owens_t(-np.inf, np.inf), 0)
assert_equal(sc.owens_t(np.inf, -np.inf), -0.0)
assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0)
| 1,389 | 29.888889 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_orthogonal_eval.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose
import scipy.special.orthogonal as orth
from scipy.special._testutils import FuncData
def test_eval_chebyt():
n = np.arange(0, 10000, 7)
x = 2*np.random.rand() - 1
v1 = np.cos(n*np.arccos(x))
v2 = orth.eval_chebyt(n, x)
assert_(np.allclose(v1, v2, rtol=1e-15))
def test_eval_genlaguerre_restriction():
# check it returns nan for alpha <= -1
assert_(np.isnan(orth.eval_genlaguerre(0, -1, 0)))
assert_(np.isnan(orth.eval_genlaguerre(0.1, -1, 0)))
def test_warnings():
# ticket 1334
olderr = np.seterr(all='raise')
try:
# these should raise no fp warnings
orth.eval_legendre(1, 0)
orth.eval_laguerre(1, 1)
orth.eval_gegenbauer(1, 1, 0)
finally:
np.seterr(**olderr)
class TestPolys(object):
"""
Check that the eval_* functions agree with the constructed polynomials
"""
def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
poly = np.poly1d(cls(*p).coef)
z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
return func(*p)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
def test_jacobi(self):
self.check_poly(orth.eval_jacobi, orth.jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1],
rtol=1e-5)
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi, orth.sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1],
rtol=1e-5)
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer, orth.gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1],
rtol=1e-7)
def test_chebyt(self):
self.check_poly(orth.eval_chebyt, orth.chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(orth.eval_chebyu, orth.chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(orth.eval_chebys, orth.chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(orth.eval_chebyc, orth.chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_chebyt, orth.sh_chebyt,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu, orth.sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(orth.eval_legendre, orth.legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
olderr = np.seterr(all='ignore')
try:
self.check_poly(orth.eval_sh_legendre, orth.sh_legendre,
param_ranges=[], x_range=[0, 1])
finally:
np.seterr(**olderr)
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre, orth.genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(orth.eval_laguerre, orth.laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
self.check_poly(orth.eval_hermite, orth.hermite,
param_ranges=[], x_range=[-100, 100])
def test_hermitenorm(self):
self.check_poly(orth.eval_hermitenorm, orth.hermitenorm,
param_ranges=[], x_range=[-100, 100])
class TestRecurrence(object):
"""
Check that the eval_* functions sig='ld->d' and 'dd->d' agree.
"""
def check_poly(self, func, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
kw = dict(sig=(len(p)+1)*'d'+'->d')
z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
kw = dict(sig='l'+(len(p)-1)*'d'+'->d')
return func(*p, **kw)
olderr = np.seterr(all='raise')
try:
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
finally:
np.seterr(**olderr)
def test_jacobi(self):
self.check_poly(orth.eval_jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)], x_range=[-1, 1])
def test_sh_jacobi(self):
self.check_poly(orth.eval_sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
def test_gegenbauer(self):
self.check_poly(orth.eval_gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1])
def test_chebyt(self):
self.check_poly(orth.eval_chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(orth.eval_chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(orth.eval_chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(orth.eval_chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
self.check_poly(orth.eval_sh_chebyt,
param_ranges=[], x_range=[0, 1])
def test_sh_chebyu(self):
self.check_poly(orth.eval_sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(orth.eval_legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
self.check_poly(orth.eval_sh_legendre,
param_ranges=[], x_range=[0, 1])
def test_genlaguerre(self):
self.check_poly(orth.eval_genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(orth.eval_laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
v = orth.eval_hermite(70, 1.0)
a = -1.457076485701412e60
assert_allclose(v,a)
| 8,170 | 31.815261 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_sph_harm.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
def test_first_harmonics():
# Test against explicit representations of the first four
# spherical harmonics which use `theta` as the azimuthal angle,
# `phi` as the polar angle, and include the Condon-Shortley
# phase.
# Notation is Ymn
def Y00(theta, phi):
return 0.5*np.sqrt(1/np.pi)
def Yn11(theta, phi):
return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
def Y01(theta, phi):
return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
def Y11(theta, phi):
return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
harms = [Y00, Yn11, Y01, Y11]
m = [0, -1, 0, 1]
n = [0, 1, 1, 1]
theta = np.linspace(0, 2*np.pi)
phi = np.linspace(0, np.pi)
theta, phi = np.meshgrid(theta, phi)
for harm, m, n in zip(harms, m, n):
assert_allclose(sc.sph_harm(m, n, theta, phi),
harm(theta, phi),
rtol=1e-15, atol=1e-15,
err_msg="Y^{}_{} incorrect".format(m, n))
| 1,182 | 28.575 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_gammainc.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
from scipy.special._testutils import FuncData
def test_line():
# Test on the line a = x where a simpler asymptotic expansion
# (analog of DLMF 8.12.15) is available.
def gammainc_line(x):
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
x = np.logspace(np.log10(25), 300, 500)
a = x.copy()
dataset = np.vstack((a, x, gammainc_line(x))).T
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
def test_gammainc_roundtrip():
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammaincinv(a, sc.gammainc(a, x))
assert_allclose(x, y, rtol=1e-10)
def test_gammaincc_roundtrip():
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammainccinv(a, sc.gammaincc(a, x))
assert_allclose(x, y, rtol=1e-14)
| 1,205 | 24.659574 | 65 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_sf_error.py
|
from __future__ import division, print_function, absolute_import
import warnings
from numpy.testing import assert_, assert_equal
from scipy._lib._numpy_compat import suppress_warnings
import pytest
from pytest import raises as assert_raises
import scipy.special as sc
from scipy.special._ufuncs import _sf_error_test_function
_sf_error_code_map = {
# skip 'ok'
'singular': 1,
'underflow': 2,
'overflow': 3,
'slow': 4,
'loss': 5,
'no_result': 6,
'domain': 7,
'arg': 8,
'other': 9
}
_sf_error_actions = [
'ignore',
'warn',
'raise'
]
def _check_action(fun, args, action):
if action == 'warn':
with pytest.warns(sc.SpecialFunctionWarning):
fun(*args)
elif action == 'raise':
with assert_raises(sc.SpecialFunctionError):
fun(*args)
else:
# action == 'ignore', make sure there are no warnings/exceptions
with warnings.catch_warnings():
warnings.simplefilter("error")
fun(*args)
def test_geterr():
err = sc.geterr()
for key, value in err.items():
assert_(key in _sf_error_code_map.keys())
assert_(value in _sf_error_actions)
def test_seterr():
entry_err = sc.geterr()
try:
for category in _sf_error_code_map.keys():
for action in _sf_error_actions:
geterr_olderr = sc.geterr()
seterr_olderr = sc.seterr(**{category: action})
assert_(geterr_olderr == seterr_olderr)
newerr = sc.geterr()
assert_(newerr[category] == action)
geterr_olderr.pop(category)
newerr.pop(category)
assert_(geterr_olderr == newerr)
_check_action(_sf_error_test_function,
(_sf_error_code_map[category],),
action)
finally:
sc.seterr(**entry_err)
def test_errstate_pyx_basic():
olderr = sc.geterr()
with sc.errstate(singular='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.loggamma(0)
assert_equal(olderr, sc.geterr())
def test_errstate_c_basic():
olderr = sc.geterr()
with sc.errstate(domain='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1)
assert_equal(olderr, sc.geterr())
def test_errstate_cpp_basic():
olderr = sc.geterr()
with sc.errstate(underflow='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.wrightomega(-1000)
assert_equal(olderr, sc.geterr())
def test_errstate():
for category in _sf_error_code_map.keys():
for action in _sf_error_actions:
olderr = sc.geterr()
with sc.errstate(**{category: action}):
_check_action(_sf_error_test_function,
(_sf_error_code_map[category],),
action)
assert_equal(olderr, sc.geterr())
def test_errstate_all_but_one():
olderr = sc.geterr()
with sc.errstate(all='raise', singular='ignore'):
sc.gammaln(0)
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1.0)
assert_equal(olderr, sc.geterr())
def test_errprint():
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`errprint` is deprecated!")
flag = sc.errprint(True)
try:
assert_(isinstance(flag, bool))
with pytest.warns(sc.SpecialFunctionWarning):
sc.loggamma(0)
finally:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`errprint` is deprecated!")
sc.errprint(flag)
| 3,685 | 27.137405 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_sici.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.special as sc
from scipy.special._testutils import FuncData
def test_sici_consistency():
# Make sure the implementation of sici for real arguments agrees
# with the implementation of sici for complex arguments.
# On the negative real axis Cephes drops the imaginary part in ci
def sici(x):
si, ci = sc.sici(x + 0j)
return si.real, ci.real
x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)]
si, ci = sc.sici(x)
dataset = np.column_stack((x, si, ci))
FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check()
def test_shichi_consistency():
# Make sure the implementation of shichi for real arguments agrees
# with the implementation of shichi for complex arguments.
# On the negative real axis Cephes drops the imaginary part in chi
def shichi(x):
shi, chi = sc.shichi(x + 0j)
return shi.real, chi.real
# Overflow happens quickly, so limit range
x = np.r_[-np.logspace(np.log10(700), -30, 200), 0,
np.logspace(-30, np.log10(700), 200)]
shi, chi = sc.shichi(x)
dataset = np.column_stack((x, shi, chi))
FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check()
| 1,293 | 32.179487 | 70 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_logsumexp.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_array_almost_equal, assert_)
from scipy.special import logsumexp
def test_logsumexp():
# Test whether logsumexp() function correctly handles large inputs.
a = np.arange(200)
desired = np.log(np.sum(np.exp(a)))
assert_almost_equal(logsumexp(a), desired)
# Now test with large numbers
b = [1000, 1000]
desired = 1000.0 + np.log(2.0)
assert_almost_equal(logsumexp(b), desired)
n = 1000
b = np.ones(n) * 10000
desired = 10000.0 + np.log(n)
assert_almost_equal(logsumexp(b), desired)
x = np.array([1e-40] * 1000000)
logx = np.log(x)
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
# Handling special values properly
assert_equal(logsumexp(np.inf), np.inf)
assert_equal(logsumexp(-np.inf), -np.inf)
assert_equal(logsumexp(np.nan), np.nan)
assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf)
# Handling an array with different magnitudes on the axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]], axis=-1),
[1e10, -1e10])
# Test keeping dimensions
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=-1,
keepdims=True),
[[1e10], [-1e10]])
# Test multiple axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=(-1,-2)),
1e10)
def test_logsumexp_b():
a = np.arange(200)
b = np.arange(200, 0, -1)
desired = np.log(np.sum(b*np.exp(a)))
assert_almost_equal(logsumexp(a, b=b), desired)
a = [1000, 1000]
b = [1.2, 1.2]
desired = 1000 + np.log(2 * 1.2)
assert_almost_equal(logsumexp(a, b=b), desired)
x = np.array([1e-40] * 100000)
b = np.linspace(1, 1000, 100000)
logx = np.log(x)
X = np.vstack((x, x))
logX = np.vstack((logx, logx))
B = np.vstack((b, b))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
(B * X).sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
(B * X).sum(axis=1))
def test_logsumexp_sign():
a = [1,1,1]
b = [1,-1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_almost_equal(r,1)
assert_equal(s,-1)
def test_logsumexp_sign_zero():
a = [1,1]
b = [1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_(not np.isfinite(r))
assert_(not np.isnan(r))
assert_(r < 0)
assert_equal(s,0)
def test_logsumexp_sign_shape():
a = np.ones((1,2,3,4))
b = np.ones_like(a)
r, s = logsumexp(a, axis=2, b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,2,4))
r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,3))
def test_logsumexp_shape():
a = np.ones((1, 2, 3, 4))
b = np.ones_like(a)
r = logsumexp(a, axis=2, b=b)
assert_equal(r.shape, (1, 2, 4))
r = logsumexp(a, axis=(1, 3), b=b)
assert_equal(r.shape, (1, 3))
def test_logsumexp_b_zero():
a = [1,10000]
b = [1,0]
assert_almost_equal(logsumexp(a, b=b), 1)
def test_logsumexp_b_shape():
a = np.zeros((4,1,2,1))
b = np.ones((3,1,5))
logsumexp(a, b=b)
| 4,021 | 26.930556 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_boxcox.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p
# There are more tests of boxcox and boxcox1p in test_mpmath.py.
def test_boxcox_basic():
x = np.array([0.5, 1, 2, 4])
# lambda = 0 => y = log(x)
y = boxcox(x, 0)
assert_almost_equal(y, np.log(x))
# lambda = 1 => y = x - 1
y = boxcox(x, 1)
assert_almost_equal(y, x - 1)
# lambda = 2 => y = 0.5*(x**2 - 1)
y = boxcox(x, 2)
assert_almost_equal(y, 0.5*(x**2 - 1))
# x = 0 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox(0, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox_underflow():
x = 1 + 1e-15
lmbda = 1e-306
y = boxcox(x, lmbda)
assert_allclose(y, np.log(x), rtol=1e-14)
def test_boxcox_nonfinite():
# x < 0 => y = nan
x = np.array([-1, -1, -0.5])
y = boxcox(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = 0 and lambda <= 0 => y = -inf
x = 0
y = boxcox(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_boxcox1p_basic():
x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3])
# lambda = 0 => y = log(1+x)
y = boxcox1p(x, 0)
assert_almost_equal(y, np.log1p(x))
# lambda = 1 => y = x
y = boxcox1p(x, 1)
assert_almost_equal(y, x)
# lambda = 2 => y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x)
y = boxcox1p(x, 2)
assert_almost_equal(y, 0.5*x*(2 + x))
# x = -1 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox1p(-1, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox1p_underflow():
x = np.array([1e-15, 1e-306])
lmbda = np.array([1e-306, 1e-18])
y = boxcox1p(x, lmbda)
assert_allclose(y, np.log1p(x), rtol=1e-14)
def test_boxcox1p_nonfinite():
# x < -1 => y = nan
x = np.array([-2, -2, -1.5])
y = boxcox1p(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = -1 and lambda <= 0 => y = -inf
x = -1
y = boxcox1p(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_inv_boxcox():
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox(x, lam)
x2 = inv_boxcox(y, lam)
assert_almost_equal(x, x2)
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox1p(x, lam)
x2 = inv_boxcox1p(y, lam)
assert_almost_equal(x, x2)
def test_inv_boxcox1p_underflow():
x = 1e-15
lam = 1e-306
y = inv_boxcox1p(x, lam)
assert_allclose(y, x, rtol=1e-14)
| 2,738 | 24.12844 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_precompute_utils.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import pytest
from scipy.special._testutils import MissingModule, check_version
from scipy.special._mptestutils import mp_assert_allclose
from scipy.special._precompute.utils import lagrange_inversion
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
_is_32bit_platform = np.intp(0).itemsize < 8
@pytest.mark.slow
@check_version(sympy, '0.7')
@check_version(mp, '0.19')
class TestInversion(object):
@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 2e-9, see gh-6938")
def test_log(self):
with mp.workdps(30):
logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
invlogcoeffs = lagrange_inversion(logcoeffs)
mp_assert_allclose(invlogcoeffs, expcoeffs)
@pytest.mark.xfail(condition=_is_32bit_platform, reason="rtol only 1e-15, see gh-6938")
def test_sin(self):
with mp.workdps(30):
sincoeffs = mp.taylor(mp.sin, 0, 10)
asincoeffs = mp.taylor(mp.asin, 0, 10)
invsincoeffs = lagrange_inversion(sincoeffs)
mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
| 1,361 | 30.674419 | 91 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_trig.py
|
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
from scipy.special._ufuncs import _sinpi as sinpi
from scipy.special._ufuncs import _cospi as cospi
def test_integer_real_part():
x = np.arange(-100, 101)
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.real, 0.0)
res = cospi(z)
assert_equal(res.imag, 0.0)
def test_half_integer_real_part():
x = np.arange(-100, 101) + 0.5
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.imag, 0.0)
res = cospi(z)
assert_equal(res.real, 0.0)
def test_intermediate_overlow():
# Make sure we avoid overflow in situations where cosh/sinh would
# overflow but the product with sin/cos would not
sinpi_pts = [complex(1 + 1e-14, 227),
complex(1e-35, 250),
complex(1e-301, 445)]
# Data generated with mpmath
sinpi_std = [complex(-8.113438309924894e+295, -np.inf),
complex(1.9507801934611995e+306, np.inf),
complex(2.205958493464539e+306, np.inf)]
for p, std in zip(sinpi_pts, sinpi_std):
assert_allclose(sinpi(p), std)
# Test for cosine, less interesting because cos(0) = 1.
p = complex(0.5 + 1e-14, 227)
std = complex(-8.113438309924894e+295, -np.inf)
assert_allclose(cospi(p), std)
@pytest.mark.xfail('win32' in sys.platform
and np.intp(0).itemsize < 8
and sys.version_info < (3, 5),
reason="fails on 32-bit Windows with old MSVC")
def test_zero_sign():
y = sinpi(-0.0)
assert y == 0.0
assert np.signbit(y)
y = sinpi(0.0)
assert y == 0.0
assert not np.signbit(y)
y = cospi(0.5)
assert y == 0.0
assert not np.signbit(y)
| 2,139 | 28.722222 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_kolmogorov.py
|
from __future__ import division, print_function, absolute_import
import itertools
import numpy as np
from numpy.testing import assert_
from scipy.special._testutils import FuncData
import pytest
from scipy.special import smirnov, smirnovi, kolmogorov, kolmogi
_rtol = 1e-10
class TestSmirnov(object):
def test_nan(self):
assert_(np.isnan(smirnov(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.1, 0.9),
(1, 0.875, 0.125),
(2, 0.875, 0.125 * 0.125),
(3, 0.875, 0.125 * 0.125 * 0.125)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.25),
(3, 0.5, 0.166666666667),
(4, 0.5, 0.09375),
(5, 0.5, 0.056),
(6, 0.5, 0.0327932098765),
(7, 0.5, 0.0191958707681),
(8, 0.5, 0.0112953186035),
(9, 0.5, 0.00661933257355),
(10, 0.5, 0.003888705)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_n_equals_1(self):
x = np.linspace(0, 1, 101, endpoint=True)
dataset = np.column_stack([[1]*len(x), x, 1-x])
# dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, x, p])
# dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, x, p])
# dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
def test_n_large(self):
# test for large values of n
# Probabilities should go down as n goes up
x = 0.4
pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)])
dfs = np.diff(pvals)
assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs)
dataset = [(1000, 1 - 1.0/2000, np.power(2000.0, -1000))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check()
# Check asymptotic behaviour
dataset = [(n, 1.0 / np.sqrt(n), np.exp(-2)) for n in range(1000, 5000, 1000)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=.05).check()
class TestSmirnovi(object):
def test_nan(self):
assert_(np.isnan(smirnovi(1, np.nan)))
@pytest.mark.xfail(reason="test fails; smirnovi() is not always accurate")
def test_basic(self):
dataset = [(1, 0.4, 0.6),
(1, 0.6, 0.4),
(1, 0.99, 0.01),
(1, 0.01, 0.99),
(2, 0.125 * 0.125, 0.875),
(3, 0.125 * 0.125 * 0.125, 0.875),
(10, 1.0 / 16 ** 10, 1 - 1.0 / 16)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; smirnovi(_,0) is not accurate")
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; smirnovi(1,) is not accurate")
def test_n_equals_1(self):
pp = np.linspace(0, 1, 101, endpoint=True)
dataset = [(1, p, 1-p) for p in pp]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; smirnovi(2,_) is not accurate")
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, p, x])
# dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; smirnovi(3,_) is not accurate")
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, p, x])
# dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; smirnovi(_,_) is not accurate")
def test_round_trip(self):
def _sm_smi(n, p):
return smirnov(n, smirnovi(n, p))
dataset = [(1, 0.4, 0.4),
(1, 0.6, 0.6),
(2, 0.875, 0.875),
(3, 0.875, 0.875),
(3, 0.125, 0.125),
(10, 0.999, 0.999),
(10, 0.0001, 0.0001)]
dataset = np.asarray(dataset)
FuncData(_sm_smi, dataset, (0, 1), 2, rtol=_rtol).check()
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.366025403784),
(2, 0.25, 0.5),
(3, 0.5, 0.297156508177),
(4, 0.5, 0.255520481121),
(5, 0.5, 0.234559536069),
(6, 0.5, 0.21715965898),
(7, 0.5, 0.202722580034),
(8, 0.5, 0.190621765256),
(9, 0.5, 0.180363501362),
(10, 0.5, 0.17157867006)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check()
class TestKolmogorov(object):
def test_nan(self):
assert_(np.isnan(kolmogorov(np.nan)))
def test_basic(self):
dataset = [(0, 1.0),
(0.5, 0.96394524366487511),
(1, 0.26999967167735456),
(2, 0.00067092525577969533)]
dataset = np.asarray(dataset)
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
def test_smallx(self):
epsilon = 0.1 ** np.arange(1, 14)
x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217,
0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254,
0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658,
0.19487060742])
dataset = np.column_stack([x, 1-epsilon])
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; kolmogi() is not accurate for small p")
def test_round_trip(self):
def _ki_k(_x):
return kolmogi(kolmogorov(_x))
x = np.linspace(0.0, 2.0, 21, endpoint=True)
dataset = np.column_stack([x, x])
FuncData(_ki_k, dataset, (0,), 1, rtol=_rtol).check()
class TestKolmogi(object):
def test_nan(self):
assert_(np.isnan(kolmogi(np.nan)))
@pytest.mark.xfail(reason="test fails; kolmogi() is not accurate for small p")
def test_basic(self):
dataset = [(1.0, 0),
(0.96394524366487511, 0.5),
(0.26999967167735456, 1),
(0.00067092525577969533, 2)]
dataset = np.asarray(dataset)
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
@pytest.mark.xfail(reason="test fails; kolmogi() is not accurate for small p")
def test_smallp(self):
epsilon = 0.1 ** np.arange(1, 14)
x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217,
0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254,
0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658,
0.19487060742])
dataset = np.column_stack([1-epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _k_ki(_p):
return kolmogorov(kolmogi(_p))
p = np.linspace(0.1, 1.0, 10, endpoint=True)
dataset = np.column_stack([p, p])
FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check()
| 9,112 | 36.502058 | 87 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_mpmath.py
|
"""
Test Scipy functions versus mpmath, if available.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_, assert_allclose
from numpy import pi
import pytest
import itertools
from distutils.version import LooseVersion
import scipy.special as sc
from scipy._lib.six import with_metaclass
from scipy.special._testutils import (
MissingModule, check_version, FuncData,
assert_func_equal)
from scipy.special._mptestutils import (
Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal,
nonfunctional_tooslow, trace_args, time_limited, exception_to_nan,
inf_to_nan)
from scipy.special._ufuncs import (
_sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx,
_igam_fac)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
_is_32bit_platform = np.intp(0).itemsize < 8
# ------------------------------------------------------------------------------
# expi
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.10')
def test_expi_complex():
dataset = []
for r in np.logspace(-99, 2, 10):
for p in np.linspace(0, 2*np.pi, 30):
z = r*np.exp(1j*p)
dataset.append((z, complex(mpmath.ei(z))))
dataset = np.array(dataset, dtype=np.complex_)
FuncData(sc.expi, dataset, 0, 1).check()
# ------------------------------------------------------------------------------
# expn
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_expn_large_n():
# Test the transition to the asymptotic regime of n.
dataset = []
for n in [50, 51]:
for x in np.logspace(0, 4, 200):
with mpmath.workdps(100):
dataset.append((n, x, float(mpmath.expint(n, x))))
dataset = np.asarray(dataset)
FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# hyp0f1
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_hyp0f1_gh5764():
# Do a small and somewhat systematic test that runs quickly
dataset = []
axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]
for v in axis:
for x in axis:
for y in axis:
z = x + 1j*y
# mpmath computes the answer correctly at dps ~ 17 but
# fails for 20 < dps < 120 (uses a different method);
# set the dps high enough that this isn't an issue
with mpmath.workdps(120):
res = complex(mpmath.hyp0f1(v, z))
dataset.append((v, z, res))
dataset = np.array(dataset)
FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2,
rtol=1e-13).check()
@check_version(mpmath, '0.19')
def test_hyp0f1_gh_1609():
# this is a regression test for gh-1609
vv = np.linspace(150, 180, 21)
af = sc.hyp0f1(vv, 0.5)
mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv])
assert_allclose(af, mf.astype(float), rtol=1e-12)
# ------------------------------------------------------------------------------
# hyp2f1
# ------------------------------------------------------------------------------
@check_version(mpmath, '1.0.0')
def test_hyp2f1_strange_points():
pts = [
(2, -1, -1, 0.7), # expected: 2.4
(2, -2, -2, 0.7), # expected: 3.87
]
pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4))
pts = [
(a, b, c, x) for a, b, c, x in pts
if b == c and round(b) == b and b < 0 and b != -1000
]
kw = dict(eliminate=True)
dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
@check_version(mpmath, '0.13')
def test_hyp2f1_real_some_points():
pts = [
(1, 2, 3, 0),
(1./3, 2./3, 5./6, 27./32),
(1./4, 1./2, 3./4, 80./81),
(2,-2, -3, 3),
(2, -3, -2, 3),
(2, -1.5, -1.5, 3),
(1, 2, 3, 0),
(0.7235, -1, -5, 0.3),
(0.25, 1./3, 2, 0.999),
(0.25, 1./3, 2, -1),
(2, 3, 5, 0.99),
(3./2, -0.5, 3, 0.99),
(2, 2.5, -3.25, 0.999),
(-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001),
(-10, 900, -10.5, 0.99),
(-10, 900, 10.5, 0.99),
(-1, 2, 1, 1.0),
(-1, 2, 1, -1.0),
(-3, 13, 5, 1.0),
(-3, 13, 5, -1.0),
(0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561
]
dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
olderr = np.seterr(invalid='ignore')
try:
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
finally:
np.seterr(**olderr)
@check_version(mpmath, '0.14')
def test_hyp2f1_some_points_2():
# Taken from mpmath unit tests -- this point failed for mpmath 0.13 but
# was fixed in their SVN since then
pts = [
(112, (51,10), (-9,10), -0.99999),
(10,-900,10.5,0.99),
(10,-900,-10.5,0.99),
]
def fev(x):
if isinstance(x, tuple):
return float(x[0]) / x[1]
else:
return x
dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
@check_version(mpmath, '0.13')
def test_hyp2f1_real_some():
dataset = []
for a in [-10, -5, -1.8, 1.8, 5, 10]:
for b in [-2.5, -1, 1, 7.4]:
for c in [-9, -1.8, 5, 20.4]:
for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]:
try:
v = float(mpmath.hyp2f1(a, b, c, z))
except:
continue
dataset.append((a, b, c, z, v))
dataset = np.array(dataset, dtype=np.float_)
olderr = np.seterr(invalid='ignore')
try:
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9,
ignore_inf_sign=True).check()
finally:
np.seterr(**olderr)
@check_version(mpmath, '0.12')
@pytest.mark.slow
def test_hyp2f1_real_random():
npoints = 500
dataset = np.zeros((npoints, 5), np.float_)
np.random.seed(1234)
dataset[:, 0] = np.random.pareto(1.5, npoints)
dataset[:, 1] = np.random.pareto(1.5, npoints)
dataset[:, 2] = np.random.pareto(1.5, npoints)
dataset[:, 3] = 2*np.random.rand(npoints) - 1
dataset[:, 0] *= (-1)**np.random.randint(2, npoints)
dataset[:, 1] *= (-1)**np.random.randint(2, npoints)
dataset[:, 2] *= (-1)**np.random.randint(2, npoints)
for ds in dataset:
if mpmath.__version__ < '0.14':
# mpmath < 0.14 fails for c too much smaller than a, b
if abs(ds[:2]).max() > abs(ds[2]):
ds[2] = abs(ds[:2]).max()
ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4])))
FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check()
# ------------------------------------------------------------------------------
# erf (complex)
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.14')
def test_erf_complex():
# need to increase mpmath precision for this test
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
mpmath.mp.dps = 70
x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11))
x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11))
points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()]
assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points,
vectorized=False, rtol=1e-13)
assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points,
vectorized=False, rtol=1e-13)
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
# ------------------------------------------------------------------------------
# lpmv
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.15')
def test_lpmv():
pts = []
for x in [-0.99, -0.557, 1e-6, 0.132, 1]:
pts.extend([
(1, 1, x),
(1, -1, x),
(-1, 1, x),
(-1, -2, x),
(1, 1.7, x),
(1, -1.7, x),
(-1, 1.7, x),
(-1, -2.7, x),
(1, 10, x),
(1, 11, x),
(3, 8, x),
(5, 11, x),
(-3, 8, x),
(-5, 11, x),
(3, -8, x),
(5, -11, x),
(-3, -8, x),
(-5, -11, x),
(3, 8.3, x),
(5, 11.3, x),
(-3, 8.3, x),
(-5, 11.3, x),
(3, -8.3, x),
(5, -11.3, x),
(-3, -8.3, x),
(-5, -11.3, x),
])
def mplegenp(nu, mu, x):
if mu == int(mu) and x == 1:
# mpmath 0.17 gets this wrong
if mu == 0:
return 1
else:
return 0
return mpmath.legenp(nu, mu, x)
dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
def evf(mu, nu, x):
return sc.lpmv(mu.astype(int), nu, x)
olderr = np.seterr(invalid='ignore')
try:
FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check()
finally:
np.seterr(**olderr)
# ------------------------------------------------------------------------------
# beta
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.15')
def test_beta():
np.random.seed(1234)
b = np.r_[np.logspace(-200, 200, 4),
np.logspace(-10, 10, 4),
np.logspace(-1, 1, 4),
np.arange(-10, 11, 1),
np.arange(-10, 11, 1) + 0.5,
-1, -2.3, -3, -100.3, -10003.4]
a = b
ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
mpmath.mp.dps = 400
assert_func_equal(sc.beta,
lambda a, b: float(mpmath.beta(a, b)),
ab,
vectorized=False,
rtol=1e-10,
ignore_inf_sign=True)
assert_func_equal(
sc.betaln,
lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
ab,
vectorized=False,
rtol=1e-10)
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
# ------------------------------------------------------------------------------
# loggamma
# ------------------------------------------------------------------------------
LOGGAMMA_TAYLOR_RADIUS = 0.2
@check_version(mpmath, '0.19')
def test_loggamma_taylor_transition():
# Make sure there isn't a big jump in accuracy when we move from
# using the Taylor series to using the recurrence relation.
r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1])
theta = np.linspace(0, 2*np.pi, 20)
r, theta = np.meshgrid(r, theta)
dz = r*np.exp(1j*theta)
z = np.r_[1 + dz, 2 + dz].flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(mpmath.loggamma(z0))))
dataset = np.array(dataset)
FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
@check_version(mpmath, '0.19')
def test_loggamma_taylor():
# Test around the zeros at z = 1, 2.
r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10)
theta = np.linspace(0, 2*np.pi, 20)
r, theta = np.meshgrid(r, theta)
dz = r*np.exp(1j*theta)
z = np.r_[1 + dz, 2 + dz].flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(mpmath.loggamma(z0))))
dataset = np.array(dataset)
FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
# ------------------------------------------------------------------------------
# rgamma
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_rgamma_zeros():
# Test around the zeros at z = 0, -1, -2, ..., -169. (After -169 we
# get values that are out of floating point range even when we're
# within 0.1 of the zero.)
# Can't use too many points here or the test takes forever.
dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = np.arange(0, -170, -1).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
dataset = []
with mpmath.workdps(100):
for z0 in z:
dataset.append((z0, complex(mpmath.rgamma(z0))))
dataset = np.array(dataset)
FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check()
# ------------------------------------------------------------------------------
# digamma
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_digamma_roots():
# Test the special-cased roots for digamma.
root = mpmath.findroot(mpmath.digamma, 1.5)
roots = [float(root)]
root = mpmath.findroot(mpmath.digamma, -0.5)
roots.append(float(root))
roots = np.array(roots)
# If we test beyond a radius of 0.24 mpmath will take forever.
dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
z = (roots + np.dstack((dz,)*roots.size)).flatten()
dataset = []
with mpmath.workdps(30):
for z0 in z:
dataset.append((z0, complex(mpmath.digamma(z0))))
dataset = np.array(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
@check_version(mpmath, '0.19')
def test_digamma_negreal():
# Test digamma around the negative real axis. Don't do this in
# TestSystematic because the points need some jiggering so that
# mpmath doesn't take forever.
digamma = exception_to_nan(mpmath.digamma)
x = -np.logspace(300, -30, 100)
y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)]
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
with mpmath.workdps(40):
for z0 in z:
res = digamma(z0)
dataset.append((z0, complex(res)))
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
@check_version(mpmath, '0.19')
def test_digamma_boundary():
# Check that there isn't a jump in accuracy when we switch from
# using the asymptotic series to the reflection formula.
x = -np.logspace(300, -30, 100)
y = np.array([-6.1, -5.9, 5.9, 6.1])
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
with mpmath.workdps(30):
for z0 in z:
res = mpmath.digamma(z0)
dataset.append((z0, complex(res)))
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# gammainc
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_gammainc_boundary():
# Test the transition to the asymptotic series.
small = 20
a = np.linspace(0.5*small, 2*small, 50)
x = a.copy()
a, x = np.meshgrid(a, x)
a, x = a.flatten(), x.flatten()
dataset = []
with mpmath.workdps(100):
for a0, x0 in zip(a, x):
dataset.append((a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True))))
dataset = np.array(dataset)
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check()
# ------------------------------------------------------------------------------
# spence
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_spence_circle():
# The trickiest region for spence is around the circle |z - 1| = 1,
# so test that region carefully.
def spence(z):
return complex(mpmath.polylog(2, 1 - z))
r = np.linspace(0.5, 1.5)
theta = np.linspace(0, 2*pi)
z = (1 + np.outer(r, np.exp(1j*theta))).flatten()
dataset = []
for z0 in z:
dataset.append((z0, spence(z0)))
dataset = np.array(dataset)
FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check()
# ------------------------------------------------------------------------------
# sinpi and cospi
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_sinpi_zeros():
eps = np.finfo(float).eps
dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = np.arange(-100, 100, 1).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(mpmath.sinpi(z0))))
dataset = np.array(dataset)
FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check()
@check_version(mpmath, '0.19')
def test_cospi_zeros():
eps = np.finfo(float).eps
dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(mpmath.cospi(z0))))
dataset = np.array(dataset)
FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check()
# ------------------------------------------------------------------------------
# ellipj
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_dn_quarter_period():
def dn(u, m):
return sc.ellipj(u, m)[2]
def mpmath_dn(u, m):
return float(mpmath.ellipfun("dn", u=u, m=m))
m = np.linspace(0, 1, 20)
du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)]
dataset = []
for m0 in m:
u0 = float(mpmath.ellipk(m0))
for du0 in du:
p = u0 + du0
dataset.append((p, m0, mpmath_dn(p, m0)))
dataset = np.asarray(dataset)
FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check()
# ------------------------------------------------------------------------------
# Wright Omega
# ------------------------------------------------------------------------------
def _mpmath_wrightomega(z, dps):
with mpmath.workdps(dps):
z = mpmath.mpc(z)
unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi))
res = mpmath.lambertw(mpmath.exp(z), unwind)
return res
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_branch():
x = -np.logspace(10, 0, 25)
picut_above = [np.nextafter(np.pi, np.inf)]
picut_below = [np.nextafter(np.pi, -np.inf)]
npicut_above = [np.nextafter(-np.pi, np.inf)]
npicut_below = [np.nextafter(-np.pi, -np.inf)]
for i in range(50):
picut_above.append(np.nextafter(picut_above[-1], np.inf))
picut_below.append(np.nextafter(picut_below[-1], -np.inf))
npicut_above.append(np.nextafter(npicut_above[-1], np.inf))
npicut_below.append(np.nextafter(npicut_below[-1], -np.inf))
y = np.hstack((picut_above, picut_below, npicut_above, npicut_below))
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(_mpmath_wrightomega(z0, 25))))
dataset = np.asarray(dataset)
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check()
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_region1():
# This region gets less coverage in the TestSystematic test
x = np.linspace(-2, 1)
y = np.linspace(1, 2*np.pi)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(_mpmath_wrightomega(z0, 25))))
dataset = np.asarray(dataset)
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_region2():
# This region gets less coverage in the TestSystematic test
x = np.linspace(-2, 1)
y = np.linspace(-2*np.pi, -1)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(_mpmath_wrightomega(z0, 25))))
dataset = np.asarray(dataset)
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
# ------------------------------------------------------------------------------
# lambertw
# ------------------------------------------------------------------------------
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_lambertw_smallz():
x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = []
for z0 in z:
dataset.append((z0, complex(mpmath.lambertw(z0))))
dataset = np.asarray(dataset)
FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# Systematic tests
# ------------------------------------------------------------------------------
HYPERKW = dict(maxprec=200, maxterms=200)
@pytest.mark.slow
@check_version(mpmath, '0.17')
class TestSystematic(object):
def test_airyai(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[Arg(-1e3, 1e3)])
def test_airyai_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[ComplexArg()])
def test_airyai_prime(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[Arg(-1e3, 1e3)])
def test_airyai_prime_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[ComplexArg()])
def test_airybi(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[Arg(-1e3, 1e3)])
def test_airybi_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[ComplexArg()])
def test_airybi_prime(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[Arg(-1e3, 1e3)])
def test_airybi_prime_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[ComplexArg()])
def test_bei(self):
assert_mpmath_equal(sc.bei,
exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)),
[Arg(-1e3, 1e3)])
def test_ber(self):
assert_mpmath_equal(sc.ber,
exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)),
[Arg(-1e3, 1e3)])
def test_bernoulli(self):
assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)],
lambda n: float(mpmath.bernoulli(int(n))),
[IntArg(0, 13000)],
rtol=1e-9, n=13000)
def test_besseli(self):
assert_mpmath_equal(sc.iv,
exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg()],
atol=1e-270)
def test_besseli_complex(self):
assert_mpmath_equal(lambda v, z: sc.iv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), ComplexArg()])
def test_besselj(self):
assert_mpmath_equal(sc.jv,
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg(-1e3, 1e3)],
ignore_inf_sign=True)
# loss of precision at large arguments due to oscillation
assert_mpmath_equal(sc.jv,
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
ignore_inf_sign=True,
rtol=1e-5)
def test_besselj_complex(self):
assert_mpmath_equal(lambda v, z: sc.jv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(), ComplexArg()])
def test_besselk(self):
assert_mpmath_equal(sc.kv,
mpmath.besselk,
[Arg(-200, 200), Arg(0, np.inf)],
nan_ok=False, rtol=1e-12)
def test_besselk_int(self):
assert_mpmath_equal(sc.kn,
mpmath.besselk,
[IntArg(-200, 200), Arg(0, np.inf)],
nan_ok=False, rtol=1e-12)
def test_besselk_complex(self):
assert_mpmath_equal(lambda v, z: sc.kv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), ComplexArg()])
def test_bessely(self):
def mpbessely(v, x):
r = float(mpmath.bessely(v, x, **HYPERKW))
if abs(r) > 1e305:
# overflowing to inf a bit earlier is OK
r = np.inf * np.sign(r)
if abs(r) == 0 and x == 0:
# invalid result from mpmath, point x=0 is a divergence
return np.nan
return r
assert_mpmath_equal(sc.yv,
exception_to_nan(mpbessely),
[Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
n=5000)
def test_bessely_complex(self):
def mpbessely(v, x):
r = complex(mpmath.bessely(v, x, **HYPERKW))
if abs(r) > 1e305:
# overflowing to inf a bit earlier is OK
olderr = np.seterr(invalid='ignore')
try:
r = np.inf * np.sign(r)
finally:
np.seterr(**olderr)
return r
assert_mpmath_equal(lambda v, z: sc.yv(v.real, z),
exception_to_nan(mpbessely),
[Arg(), ComplexArg()],
n=15000)
def test_bessely_int(self):
def mpbessely(v, x):
r = float(mpmath.bessely(v, x))
if abs(r) == 0 and x == 0:
# invalid result from mpmath, point x=0 is a divergence
return np.nan
return r
assert_mpmath_equal(lambda v, z: sc.yn(int(v), z),
exception_to_nan(mpbessely),
[IntArg(-1000, 1000), Arg(-1e8, 1e8)])
def test_beta(self):
bad_points = []
def beta(a, b, nonzero=False):
if a < -1e12 or b < -1e12:
# Function is defined here only at integers, but due
# to loss of precision this is numerically
# ill-defined. Don't compare values here.
return np.nan
if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0:
# close to a zero of the function: mpmath and scipy
# will not round here the same, so the test needs to be
# run with an absolute tolerance
if nonzero:
bad_points.append((float(a), float(b)))
return np.nan
return mpmath.beta(a, b)
assert_mpmath_equal(sc.beta,
lambda a, b: beta(a, b, nonzero=True),
[Arg(), Arg()],
dps=400,
ignore_inf_sign=True)
assert_mpmath_equal(sc.beta,
beta,
np.array(bad_points),
dps=400,
ignore_inf_sign=True,
atol=1e-11)
def test_betainc(self):
assert_mpmath_equal(sc.betainc,
time_limited()(exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))),
[Arg(), Arg(), Arg()])
def test_binom(self):
bad_points = []
def binomial(n, k, nonzero=False):
if abs(k) > 1e8*(abs(n) + 1):
# The binomial is rapidly oscillating in this region,
# and the function is numerically ill-defined. Don't
# compare values here.
return np.nan
if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15:
# close to a zero of the function: mpmath and scipy
# will not round here the same, so the test needs to be
# run with an absolute tolerance
if nonzero:
bad_points.append((float(n), float(k)))
return np.nan
return mpmath.binomial(n, k)
assert_mpmath_equal(sc.binom,
lambda n, k: binomial(n, k, nonzero=True),
[Arg(), Arg()],
dps=400)
assert_mpmath_equal(sc.binom,
binomial,
np.array(bad_points),
dps=400,
atol=1e-14)
def test_chebyt_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x),
exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)),
[IntArg(), Arg()], dps=50)
@pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
def test_chebyt(self):
assert_mpmath_equal(sc.eval_chebyt,
lambda n, x: time_limited()(exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW),
[Arg(-101, 101), Arg()], n=10000)
def test_chebyu_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x),
exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)),
[IntArg(), Arg()], dps=50)
@pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
def test_chebyu(self):
assert_mpmath_equal(sc.eval_chebyu,
lambda n, x: time_limited()(exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW),
[Arg(-101, 101), Arg()])
def test_chi(self):
def chi(x):
return sc.shichi(x)[1]
assert_mpmath_equal(chi, mpmath.chi, [Arg()])
# check asymptotic series cross-over
assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
def test_chi_complex(self):
def chi(z):
return sc.shichi(z)[1]
# chi oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(chi,
mpmath.chi,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-12)
def test_ci(self):
def ci(x):
return sc.sici(x)[1]
# oscillating function: limit range
assert_mpmath_equal(ci,
mpmath.ci,
[Arg(-1e8, 1e8)])
def test_ci_complex(self):
def ci(z):
return sc.sici(z)[1]
# ci oscillates as Re[z] -> +- inf, so limit range
assert_mpmath_equal(ci,
mpmath.ci,
[ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
rtol=1e-8)
def test_cospi(self):
eps = np.finfo(float).eps
assert_mpmath_equal(_cospi,
mpmath.cospi,
[Arg()], nan_ok=False, rtol=eps)
def test_cospi_complex(self):
assert_mpmath_equal(_cospi,
mpmath.cospi,
[ComplexArg()], nan_ok=False, rtol=1e-13)
def test_digamma(self):
assert_mpmath_equal(sc.digamma,
exception_to_nan(mpmath.digamma),
[Arg()], rtol=1e-12, dps=50)
def test_digamma_complex(self):
# Test on a cut plane because mpmath will hang. See
# test_digamma_negreal for tests on the negative real axis.
def param_filter(z):
return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True)
assert_mpmath_equal(sc.digamma,
exception_to_nan(mpmath.digamma),
[ComplexArg()], rtol=1e-13, dps=40,
param_filter=param_filter)
def test_e1(self):
assert_mpmath_equal(sc.exp1,
mpmath.e1,
[Arg()], rtol=1e-14)
def test_e1_complex(self):
# E_1 oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(sc.exp1,
mpmath.e1,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-11)
# Check cross-over region
assert_mpmath_equal(sc.exp1,
mpmath.e1,
(np.linspace(-50, 50, 171)[:, None] +
np.r_[0, np.logspace(-3, 2, 61),
-np.logspace(-3, 2, 11)]*1j).ravel(),
rtol=1e-11)
assert_mpmath_equal(sc.exp1,
mpmath.e1,
(np.linspace(-50, -35, 10000) + 0j),
rtol=1e-11)
def test_exprel(self):
assert_mpmath_equal(sc.exprel,
lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
[Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))])
assert_mpmath_equal(sc.exprel,
lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11)
assert_(np.isinf(sc.exprel(np.inf)))
assert_(sc.exprel(-np.inf) == 0)
def test_expm1_complex(self):
# Oscillates as a function of Im[z], so limit range to avoid loss of precision
assert_mpmath_equal(sc.expm1,
mpmath.expm1,
[ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))])
def test_log1p_complex(self):
assert_mpmath_equal(sc.log1p,
lambda x: mpmath.log(x+1),
[ComplexArg()], dps=60)
def test_log1pmx(self):
assert_mpmath_equal(_log1pmx,
lambda x: mpmath.log(x + 1) - x,
[Arg()], dps=60, rtol=1e-14)
def test_ei(self):
assert_mpmath_equal(sc.expi,
mpmath.ei,
[Arg()],
rtol=1e-11)
def test_ei_complex(self):
# Ei oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(sc.expi,
mpmath.ei,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-9)
def test_ellipe(self):
assert_mpmath_equal(sc.ellipe,
mpmath.ellipe,
[Arg(b=1.0)])
def test_ellipeinc(self):
assert_mpmath_equal(sc.ellipeinc,
mpmath.ellipe,
[Arg(-1e3, 1e3), Arg(b=1.0)])
def test_ellipeinc_largephi(self):
assert_mpmath_equal(sc.ellipeinc,
mpmath.ellipe,
[Arg(), Arg()])
def test_ellipf(self):
assert_mpmath_equal(sc.ellipkinc,
mpmath.ellipf,
[Arg(-1e3, 1e3), Arg()])
def test_ellipf_largephi(self):
assert_mpmath_equal(sc.ellipkinc,
mpmath.ellipf,
[Arg(), Arg()])
def test_ellipk(self):
assert_mpmath_equal(sc.ellipk,
mpmath.ellipk,
[Arg(b=1.0)])
assert_mpmath_equal(sc.ellipkm1,
lambda m: mpmath.ellipk(1 - m),
[Arg(a=0.0)],
dps=400)
def test_ellipkinc(self):
def ellipkinc(phi, m):
return mpmath.ellippi(0, phi, m)
assert_mpmath_equal(sc.ellipkinc,
ellipkinc,
[Arg(-1e3, 1e3), Arg(b=1.0)],
ignore_inf_sign=True)
def test_ellipkinc_largephi(self):
def ellipkinc(phi, m):
return mpmath.ellippi(0, phi, m)
assert_mpmath_equal(sc.ellipkinc,
ellipkinc,
[Arg(), Arg(b=1.0)],
ignore_inf_sign=True)
def test_ellipfun_sn(self):
def sn(u, m):
# mpmath doesn't get the zero at u = 0--fix that
if u == 0:
return 0
else:
return mpmath.ellipfun("sn", u=u, m=m)
# Oscillating function --- limit range of first argument; the
# loss of precision there is an expected numerical feature
# rather than an actual bug
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0],
sn,
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_ellipfun_cn(self):
# see comment in ellipfun_sn
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1],
lambda u, m: mpmath.ellipfun("cn", u=u, m=m),
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_ellipfun_dn(self):
# see comment in ellipfun_sn
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2],
lambda u, m: mpmath.ellipfun("dn", u=u, m=m),
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_erf(self):
assert_mpmath_equal(sc.erf,
lambda z: mpmath.erf(z),
[Arg()])
def test_erf_complex(self):
assert_mpmath_equal(sc.erf,
lambda z: mpmath.erf(z),
[ComplexArg()], n=200)
def test_erfc(self):
assert_mpmath_equal(sc.erfc,
exception_to_nan(lambda z: mpmath.erfc(z)),
[Arg()], rtol=1e-13)
def test_erfc_complex(self):
assert_mpmath_equal(sc.erfc,
exception_to_nan(lambda z: mpmath.erfc(z)),
[ComplexArg()], n=200)
def test_erfi(self):
assert_mpmath_equal(sc.erfi,
mpmath.erfi,
[Arg()], n=200)
def test_erfi_complex(self):
assert_mpmath_equal(sc.erfi,
mpmath.erfi,
[ComplexArg()], n=200)
def test_ndtr(self):
assert_mpmath_equal(sc.ndtr,
exception_to_nan(lambda z: mpmath.ncdf(z)),
[Arg()], n=200)
def test_ndtr_complex(self):
assert_mpmath_equal(sc.ndtr,
lambda z: mpmath.erfc(-z/np.sqrt(2.))/2.,
[ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], n=400)
def test_log_ndtr(self):
assert_mpmath_equal(sc.log_ndtr,
exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))),
[Arg()], n=600, dps=300)
def test_log_ndtr_complex(self):
assert_mpmath_equal(sc.log_ndtr,
exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)),
[ComplexArg(a=complex(-10000, -100),
b=complex(10000, 100))], n=200, dps=300)
def test_eulernum(self):
assert_mpmath_equal(lambda n: sc.euler(n)[-1],
mpmath.eulernum,
[IntArg(1, 10000)], n=10000)
def test_expint(self):
assert_mpmath_equal(sc.expn,
mpmath.expint,
[IntArg(0, 200), Arg(0, np.inf)],
rtol=1e-13, dps=160)
def test_fresnels(self):
def fresnels(x):
return sc.fresnel(x)[0]
assert_mpmath_equal(fresnels,
mpmath.fresnels,
[Arg()])
def test_fresnelc(self):
def fresnelc(x):
return sc.fresnel(x)[1]
assert_mpmath_equal(fresnelc,
mpmath.fresnelc,
[Arg()])
def test_gamma(self):
assert_mpmath_equal(sc.gamma,
exception_to_nan(mpmath.gamma),
[Arg()])
def test_gamma_complex(self):
assert_mpmath_equal(sc.gamma,
exception_to_nan(mpmath.gamma),
[ComplexArg()], rtol=5e-13)
def test_gammainc(self):
# Larger arguments are tested in test_data.py:test_local
assert_mpmath_equal(sc.gammainc,
lambda z, b: mpmath.gammainc(z, b=b, regularized=True),
[Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
nan_ok=False, rtol=1e-11)
def test_gammaincc(self):
# Larger arguments are tested in test_data.py:test_local
assert_mpmath_equal(sc.gammaincc,
lambda z, a: mpmath.gammainc(z, a=a, regularized=True),
[Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
nan_ok=False, rtol=1e-11)
def test_gammaln(self):
# The real part of loggamma is log(|gamma(z)|).
def f(z):
return mpmath.loggamma(z).real
assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()])
@pytest.mark.xfail(run=False)
def test_gegenbauer(self):
assert_mpmath_equal(sc.eval_gegenbauer,
exception_to_nan(mpmath.gegenbauer),
[Arg(-1e3, 1e3), Arg(), Arg()])
def test_gegenbauer_int(self):
# Redefine functions to deal with numerical + mpmath issues
def gegenbauer(n, a, x):
# Avoid overflow at large `a` (mpmath would need an even larger
# dps to handle this correctly, so just skip this region)
if abs(a) > 1e100:
return np.nan
# Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these
# always correctly
if n == 0:
r = 1.0
elif n == 1:
r = 2*a*x
else:
r = mpmath.gegenbauer(n, a, x)
# Mpmath 0.17 gives wrong results (spurious zero) in some cases, so
# compute the value by perturbing the result
if float(r) == 0 and a < -1 and float(a) == int(float(a)):
r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x)
if abs(r) < mpmath.mpf('1e-50'):
r = mpmath.mpf('0.0')
# Differing overflow thresholds in scipy vs. mpmath
if abs(r) > 1e270:
return np.inf
return r
def sc_gegenbauer(n, a, x):
r = sc.eval_gegenbauer(int(n), a, x)
# Differing overflow thresholds in scipy vs. mpmath
if abs(r) > 1e270:
return np.inf
return r
assert_mpmath_equal(sc_gegenbauer,
exception_to_nan(gegenbauer),
[IntArg(0, 100), Arg(-1e9, 1e9), Arg()],
n=40000, dps=100,
ignore_inf_sign=True, rtol=1e-6)
# Check the small-x expansion
assert_mpmath_equal(sc_gegenbauer,
exception_to_nan(gegenbauer),
[IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))],
dps=100,
ignore_inf_sign=True)
@pytest.mark.xfail(run=False)
def test_gegenbauer_complex(self):
assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x),
exception_to_nan(mpmath.gegenbauer),
[IntArg(0, 100), Arg(), ComplexArg()])
@nonfunctional_tooslow
def test_gegenbauer_complex_general(self):
assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x),
exception_to_nan(mpmath.gegenbauer),
[Arg(-1e3, 1e3), Arg(), ComplexArg()])
def test_hankel1(self):
assert_mpmath_equal(sc.hankel1,
exception_to_nan(lambda v, x: mpmath.hankel1(v, x,
**HYPERKW)),
[Arg(-1e20, 1e20), Arg()])
def test_hankel2(self):
assert_mpmath_equal(sc.hankel2,
exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)),
[Arg(-1e20, 1e20), Arg()])
@pytest.mark.xfail(run=False, reason="issues at intermediately large orders")
def test_hermite(self):
assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x),
exception_to_nan(mpmath.hermite),
[IntArg(0, 10000), Arg()])
# hurwitz: same as zeta
def test_hyp0f1(self):
# mpmath reports no convergence unless maxterms is large enough
KW = dict(maxprec=400, maxterms=1500)
# n=500 (non-xslow default) fails for one bad point
assert_mpmath_equal(sc.hyp0f1,
lambda a, x: mpmath.hyp0f1(a, x, **KW),
[Arg(-1e7, 1e7), Arg(0, 1e5)],
n=5000)
# NB: The range of the second parameter ("z") is limited from below
# because of an overflow in the intermediate calculations. The way
# for fix it is to implement an asymptotic expansion for Bessel J
# (similar to what is implemented for Bessel I here).
def test_hyp0f1_complex(self):
assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z),
exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)),
[Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))])
# NB: The range of the first parameter ("v") are limited by an overflow
# in the intermediate calculations. Can be fixed by implementing an
# asymptotic expansion for Bessel functions for large order.
@pytest.mark.xfail(run=False)
def test_hyp1f1(self):
assert_mpmath_equal(inf_to_nan(sc.hyp1f1),
exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)),
[Arg(-1e5, 1e5), Arg(-1e5, 1e5), Arg()],
n=2000)
@pytest.mark.xfail(run=False)
def test_hyp1f1_complex(self):
assert_mpmath_equal(inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)),
exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)),
[Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()],
n=2000)
@pytest.mark.xfail(run=False)
def test_hyp1f2(self):
def hyp1f2(a, b, c, x):
v, err = sc.hyp1f2(a, b, c, x)
if abs(err) > max(1, abs(v)) * 1e-7:
return np.nan
return v
assert_mpmath_equal(hyp1f2,
exception_to_nan(lambda a, b, c, x: mpmath.hyp1f2(a, b, c, x, **HYPERKW)),
[Arg(), Arg(), Arg(), Arg()],
n=20000)
@pytest.mark.xfail(run=False)
def test_hyp2f0(self):
def hyp2f0(a, b, x):
v, err = sc.hyp2f0(a, b, x, 1)
if abs(err) > max(1, abs(v)) * 1e-7:
return np.nan
return v
assert_mpmath_equal(hyp2f0,
lambda a, b, x: time_limited(0.1)(exception_to_nan(trace_args(mpmath.hyp2f0)))(
a, b, x, **HYPERKW),
[Arg(), Arg(), Arg()])
@pytest.mark.xfail(run=False, reason="spurious inf (or inf with wrong sign) for some argument values")
def test_hyp2f1(self):
assert_mpmath_equal(sc.hyp2f1,
exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)),
[Arg(), Arg(), Arg(), Arg()])
@nonfunctional_tooslow
def test_hyp2f1_complex(self):
# Scipy's hyp2f1 seems to have performance and accuracy problems
assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x),
exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)),
[Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()],
n=10)
@pytest.mark.xfail(run=False)
def test_hyperu(self):
assert_mpmath_equal(sc.hyperu,
exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)),
[Arg(), Arg(), Arg()])
@pytest.mark.xfail(condition=_is_32bit_platform,
reason="mpmath issue gh-342: unsupported operand mpz, long for pow")
def test_igam_fac(self):
def mp_igam_fac(a, x):
return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a)
assert_mpmath_equal(_igam_fac,
mp_igam_fac,
[Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)],
rtol=1e-10)
def test_j0(self):
# The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x)
# and at large arguments the phase of the cosine loses precision.
#
# This is numerically expected behavior, so we compare only up to
# 1e8 = 1e15 * 1e-7
assert_mpmath_equal(sc.j0,
mpmath.j0,
[Arg(-1e3, 1e3)])
assert_mpmath_equal(sc.j0,
mpmath.j0,
[Arg(-1e8, 1e8)],
rtol=1e-5)
def test_j1(self):
# See comment in test_j0
assert_mpmath_equal(sc.j1,
mpmath.j1,
[Arg(-1e3, 1e3)])
assert_mpmath_equal(sc.j1,
mpmath.j1,
[Arg(-1e8, 1e8)],
rtol=1e-5)
@pytest.mark.xfail(run=False)
def test_jacobi(self):
assert_mpmath_equal(sc.eval_jacobi,
exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
[Arg(), Arg(), Arg(), Arg()])
assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x),
exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
[IntArg(), Arg(), Arg(), Arg()])
def test_jacobi_int(self):
# Redefine functions to deal with numerical + mpmath issues
def jacobi(n, a, b, x):
# Mpmath does not handle n=0 case always correctly
if n == 0:
return 1.0
return mpmath.jacobi(n, a, b, x)
assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x),
lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW),
[IntArg(), Arg(), Arg(), Arg()],
n=20000, dps=50)
def test_kei(self):
def kei(x):
if x == 0:
# work around mpmath issue at x=0
return -pi/4
return exception_to_nan(mpmath.kei)(0, x, **HYPERKW)
assert_mpmath_equal(sc.kei,
kei,
[Arg(-1e30, 1e30)], n=1000)
def test_ker(self):
assert_mpmath_equal(sc.ker,
exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)),
[Arg(-1e30, 1e30)], n=1000)
@nonfunctional_tooslow
def test_laguerre(self):
assert_mpmath_equal(trace_args(sc.eval_laguerre),
lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
[Arg(), Arg()])
def test_laguerre_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x),
lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
[IntArg(), Arg()], n=20000)
@pytest.mark.xfail(condition=_is_32bit_platform, reason="see gh-3551 for bad points")
def test_lambertw_real(self):
assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k.real)),
lambda x, k: mpmath.lambertw(x, int(k.real)),
[ComplexArg(-np.inf, np.inf), IntArg(0, 10)],
rtol=1e-13, nan_ok=False)
def test_lanczos_sum_expg_scaled(self):
maxgamma = 171.624376956302725
e = np.exp(1)
g = 6.024680040776729583740234375
def gamma(x):
with np.errstate(over='ignore'):
fac = ((x + g - 0.5)/e)**(x - 0.5)
if fac != np.inf:
res = fac*_lanczos_sum_expg_scaled(x)
else:
fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5))
res = fac*_lanczos_sum_expg_scaled(x)
res *= fac
return res
assert_mpmath_equal(gamma,
mpmath.gamma,
[Arg(0, maxgamma, inclusive_a=False)],
rtol=1e-13)
@nonfunctional_tooslow
def test_legendre(self):
assert_mpmath_equal(sc.eval_legendre,
mpmath.legendre,
[Arg(), Arg()])
def test_legendre_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
[IntArg(), Arg()],
n=20000)
# Check the small-x expansion
assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
[IntArg(), FixedArg(np.logspace(-30, -4, 20))])
def test_legenp(self):
def lpnm(n, m, z):
try:
v = sc.lpmn(m, n, z)[0][-1,-1]
except ValueError:
return np.nan
if abs(v) > 1e306:
# harmonize overflow to inf
v = np.inf * np.sign(v.real)
return v
def lpnm_2(n, m, z):
v = sc.lpmv(m, n, z)
if abs(v) > 1e306:
# harmonize overflow to inf
v = np.inf * np.sign(v.real)
return v
def legenp(n, m, z):
if (z == 1 or z == -1) and int(n) == n:
# Special case (mpmath may give inf, we take the limit by
# continuity)
if m == 0:
if n < 0:
n = -n - 1
return mpmath.power(mpmath.sign(z), n)
else:
return 0
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
typ = 2 if abs(z) < 1 else 3
v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ)
if abs(v) > 1e306:
# harmonize overflow to inf
v = mpmath.inf * mpmath.sign(v.real)
return v
assert_mpmath_equal(lpnm,
legenp,
[IntArg(-100, 100), IntArg(-100, 100), Arg()])
assert_mpmath_equal(lpnm_2,
legenp,
[IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)],
atol=1e-10)
def test_legenp_complex_2(self):
def clpnm(n, m, z):
try:
return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1]
except ValueError:
return np.nan
def legenp(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2)
# mpmath is quite slow here
x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
y = np.array([-1e3, -0.5, 0.5, 1.3])
z = (x[:,None] + 1j*y[None,:]).ravel()
assert_mpmath_equal(clpnm,
legenp,
[FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
rtol=1e-6,
n=500)
def test_legenp_complex_3(self):
def clpnm(n, m, z):
try:
return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1]
except ValueError:
return np.nan
def legenp(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3)
# mpmath is quite slow here
x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
y = np.array([-1e3, -0.5, 0.5, 1.3])
z = (x[:,None] + 1j*y[None,:]).ravel()
assert_mpmath_equal(clpnm,
legenp,
[FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
rtol=1e-6,
n=500)
@pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1")
def test_legenq(self):
def lqnm(n, m, z):
return sc.lqmn(m, n, z)[0][-1,-1]
def legenq(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenq)(n, m, z, type=2)
assert_mpmath_equal(lqnm,
legenq,
[IntArg(0, 100), IntArg(0, 100), Arg()])
@nonfunctional_tooslow
def test_legenq_complex(self):
def lqnm(n, m, z):
return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1]
def legenq(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2)
assert_mpmath_equal(lqnm,
legenq,
[IntArg(0, 100), IntArg(0, 100), ComplexArg()],
n=100)
def test_lgam1p(self):
def param_filter(x):
# Filter the poles
return np.where((np.floor(x) == x) & (x <= 0), False, True)
def mp_lgam1p(z):
# The real part of loggamma is log(|gamma(z)|)
return mpmath.loggamma(1 + z).real
assert_mpmath_equal(_lgam1p,
mp_lgam1p,
[Arg()], rtol=1e-13, dps=100,
param_filter=param_filter)
def test_loggamma(self):
def mpmath_loggamma(z):
try:
res = mpmath.loggamma(z)
except ValueError:
res = complex(np.nan, np.nan)
return res
assert_mpmath_equal(sc.loggamma,
mpmath_loggamma,
[ComplexArg()], nan_ok=False,
distinguish_nan_and_inf=False, rtol=5e-14)
@pytest.mark.xfail(run=False)
def test_pcfd(self):
def pcfd(v, x):
return sc.pbdv(v, x)[0]
assert_mpmath_equal(pcfd,
exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)),
[Arg(), Arg()])
@pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- maybe different definition?")
def test_pcfv(self):
def pcfv(v, x):
return sc.pbvv(v, x)[0]
assert_mpmath_equal(pcfv,
lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW),
[Arg(), Arg()], n=1000)
def test_pcfw(self):
def pcfw(a, x):
return sc.pbwa(a, x)[0]
def dpcfw(a, x):
return sc.pbwa(a, x)[1]
def mpmath_dpcfw(a, x):
return mpmath.diff(mpmath.pcfw, (a, x), (0, 1))
# The Zhang and Jin implementation only uses Taylor series and
# is thus accurate in only a very small range.
assert_mpmath_equal(pcfw,
mpmath.pcfw,
[Arg(-5, 5), Arg(-5, 5)], rtol=1e-8, n=100)
assert_mpmath_equal(dpcfw,
mpmath_dpcfw,
[Arg(-5, 5), Arg(-5, 5)], rtol=1e-9, n=100)
@pytest.mark.xfail(run=False, reason="issues at large arguments (atol OK, rtol not) and <eps-close to z=0")
def test_polygamma(self):
assert_mpmath_equal(sc.polygamma,
time_limited()(exception_to_nan(mpmath.polygamma)),
[IntArg(0, 1000), Arg()])
def test_rgamma(self):
def rgamma(x):
if x < -8000:
return np.inf
else:
v = mpmath.rgamma(x)
return v
# n=500 (non-xslow default) fails for one bad point
assert_mpmath_equal(sc.rgamma,
rgamma,
[Arg()],
n=5000,
ignore_inf_sign=True)
def test_rgamma_complex(self):
assert_mpmath_equal(sc.rgamma,
exception_to_nan(mpmath.rgamma),
[ComplexArg()], rtol=5e-13)
@pytest.mark.xfail(reason=("see gh-3551 for bad points on 32 bit "
"systems and gh-8095 for another bad "
"point"))
def test_rf(self):
if LooseVersion(mpmath.__version__) >= LooseVersion("1.0.0"):
# no workarounds needed
mppoch = mpmath.rf
else:
def mppoch(a, m):
# deal with cases where the result in double precision
# hits exactly a non-positive integer, but the
# corresponding extended-precision mpf floats don't
if float(a + m) == int(a + m) and float(a + m) <= 0:
a = mpmath.mpf(a)
m = int(a + m) - a
return mpmath.rf(a, m)
assert_mpmath_equal(sc.poch,
mppoch,
[Arg(), Arg()],
dps=400)
def test_sinpi(self):
eps = np.finfo(float).eps
assert_mpmath_equal(_sinpi, mpmath.sinpi,
[Arg()], nan_ok=False, rtol=eps)
def test_sinpi_complex(self):
assert_mpmath_equal(_sinpi, mpmath.sinpi,
[ComplexArg()], nan_ok=False, rtol=2e-14)
def test_shi(self):
def shi(x):
return sc.shichi(x)[0]
assert_mpmath_equal(shi, mpmath.shi, [Arg()])
# check asymptotic series cross-over
assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
def test_shi_complex(self):
def shi(z):
return sc.shichi(z)[0]
# shi oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(shi,
mpmath.shi,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-12)
def test_si(self):
def si(x):
return sc.sici(x)[0]
assert_mpmath_equal(si, mpmath.si, [Arg()])
def test_si_complex(self):
def si(z):
return sc.sici(z)[0]
# si oscillates as Re[z] -> +- inf, so limit range
assert_mpmath_equal(si,
mpmath.si,
[ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
rtol=1e-12)
def test_spence(self):
# mpmath uses a different convention for the dilogarithm
def dilog(x):
return mpmath.polylog(2, 1 - x)
# Spence has a branch cut on the negative real axis
assert_mpmath_equal(sc.spence,
exception_to_nan(dilog),
[Arg(0, np.inf)], rtol=1e-14)
def test_spence_complex(self):
def dilog(z):
return mpmath.polylog(2, 1 - z)
assert_mpmath_equal(sc.spence,
exception_to_nan(dilog),
[ComplexArg()], rtol=1e-14)
def test_spherharm(self):
def spherharm(l, m, theta, phi):
if m > l:
return np.nan
return sc.sph_harm(m, l, phi, theta)
assert_mpmath_equal(spherharm,
mpmath.spherharm,
[IntArg(0, 100), IntArg(0, 100),
Arg(a=0, b=pi), Arg(a=0, b=2*pi)],
atol=1e-8, n=6000,
dps=150)
def test_struveh(self):
assert_mpmath_equal(sc.struve,
exception_to_nan(mpmath.struveh),
[Arg(-1e4, 1e4), Arg(0, 1e4)],
rtol=5e-10)
def test_struvel(self):
def mp_struvel(v, z):
if v < 0 and z < -v and abs(v) > 1000:
# larger DPS needed for correct results
old_dps = mpmath.mp.dps
try:
mpmath.mp.dps = 300
return mpmath.struvel(v, z)
finally:
mpmath.mp.dps = old_dps
return mpmath.struvel(v, z)
assert_mpmath_equal(sc.modstruve,
exception_to_nan(mp_struvel),
[Arg(-1e4, 1e4), Arg(0, 1e4)],
rtol=5e-10,
ignore_inf_sign=True)
def test_wrightomega(self):
assert_mpmath_equal(sc.wrightomega,
lambda z: _mpmath_wrightomega(z, 25),
[ComplexArg()], rtol=1e-14, nan_ok=False)
def test_zeta(self):
assert_mpmath_equal(sc.zeta,
exception_to_nan(mpmath.zeta),
[Arg(a=1, b=1e10, inclusive_a=False),
Arg(a=0, inclusive_a=False)])
def test_zetac(self):
assert_mpmath_equal(sc.zetac,
lambda x: mpmath.zeta(x) - 1,
[Arg(-100, 100)],
nan_ok=False, dps=45, rtol=1e-13)
def test_boxcox(self):
def mp_boxcox(x, lmbda):
x = mpmath.mp.mpf(x)
lmbda = mpmath.mp.mpf(lmbda)
if lmbda == 0:
return mpmath.mp.log(x)
else:
return mpmath.mp.powm1(x, lmbda) / lmbda
assert_mpmath_equal(sc.boxcox,
exception_to_nan(mp_boxcox),
[Arg(a=0, inclusive_a=False), Arg()],
n=200,
dps=60,
rtol=1e-13)
def test_boxcox1p(self):
def mp_boxcox1p(x, lmbda):
x = mpmath.mp.mpf(x)
lmbda = mpmath.mp.mpf(lmbda)
one = mpmath.mp.mpf(1)
if lmbda == 0:
return mpmath.mp.log(one + x)
else:
return mpmath.mp.powm1(one + x, lmbda) / lmbda
assert_mpmath_equal(sc.boxcox1p,
exception_to_nan(mp_boxcox1p),
[Arg(a=-1, inclusive_a=False), Arg()],
n=200,
dps=60,
rtol=1e-13)
def test_spherical_jn(self):
def mp_spherical_jn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n), z),
exception_to_nan(mp_spherical_jn),
[IntArg(0, 200), Arg(-1e8, 1e8)],
dps=300)
def test_spherical_jn_complex(self):
def mp_spherical_jn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n.real), z),
exception_to_nan(mp_spherical_jn),
[IntArg(0, 200), ComplexArg()])
def test_spherical_yn(self):
def mp_spherical_yn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n), z),
exception_to_nan(mp_spherical_yn),
[IntArg(0, 200), Arg(-1e10, 1e10)],
dps=100)
def test_spherical_yn_complex(self):
def mp_spherical_yn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n.real), z),
exception_to_nan(mp_spherical_yn),
[IntArg(0, 200), ComplexArg()])
def test_spherical_in(self):
def mp_spherical_in(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n), z),
exception_to_nan(mp_spherical_in),
[IntArg(0, 200), Arg()],
dps=200, atol=10**(-278))
def test_spherical_in_complex(self):
def mp_spherical_in(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n.real), z),
exception_to_nan(mp_spherical_in),
[IntArg(0, 200), ComplexArg()])
def test_spherical_kn(self):
def mp_spherical_kn(n, z):
out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) *
mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z))))
if mpmath.mpmathify(z).imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n), z),
exception_to_nan(mp_spherical_kn),
[IntArg(0, 150), Arg()],
dps=100)
@pytest.mark.xfail(run=False, reason="Accuracy issues near z = -1 inherited from kv.")
def test_spherical_kn_complex(self):
def mp_spherical_kn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n.real), z),
exception_to_nan(mp_spherical_kn),
[IntArg(0, 200), ComplexArg()],
dps=200)
| 75,839 | 35.923077 | 123 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_spfun_stats.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_array_equal,
assert_array_almost_equal_nulp, assert_almost_equal)
from pytest import raises as assert_raises
from scipy.special import gammaln, multigammaln
class TestMultiGammaLn(object):
def test1(self):
# A test of the identity
# Gamma_1(a) = Gamma(a)
np.random.seed(1234)
a = np.abs(np.random.randn())
assert_array_equal(multigammaln(a, 1), gammaln(a))
def test2(self):
# A test of the identity
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
a = np.array([2.5, 10.0])
result = multigammaln(a, 2)
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
assert_almost_equal(result, expected)
def test_bararg(self):
assert_raises(ValueError, multigammaln, 0.5, 1.2)
def _check_multigammaln_array_result(a, d):
# Test that the shape of the array returned by multigammaln
# matches the input shape, and that all the values match
# the value computed when multigammaln is called with a scalar.
result = multigammaln(a, d)
assert_array_equal(a.shape, result.shape)
a1 = a.ravel()
result1 = result.ravel()
for i in range(a.size):
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
def test_multigammaln_array_arg():
# Check that the array returned by multigammaln has the correct
# shape and contains the correct values. The cases have arrays
# with several differnent shapes.
# The cases include a regression test for ticket #1849
# (a = np.array([2.0]), an array with a single element).
np.random.seed(1234)
cases = [
# a, d
(np.abs(np.random.randn(3, 2)) + 5, 5),
(np.abs(np.random.randn(1, 2)) + 5, 5),
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
(np.array([2.0]), 3),
(np.float64(2.0), 3),
]
for a, d in cases:
_check_multigammaln_array_result(a, d)
| 2,072 | 31.390625 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_loggamma.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_
from scipy.special._testutils import FuncData
from scipy.special import gamma, gammaln, loggamma
def test_identities1():
# test the identity exp(loggamma(z)) = gamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, gamma(z))).T
def f(z):
return np.exp(loggamma(z))
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_identities2():
# test the identity loggamma(z + 1) = log(z) + loggamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, np.log(z) + loggamma(z))).T
def f(z):
return loggamma(z + 1)
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_complex_dispatch_realpart():
# Test that the real parts of loggamma and gammaln agree on the
# real axis.
x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5
dataset = np.vstack((x, gammaln(x))).T
def f(z):
z = np.array(z, dtype='complex128')
return loggamma(z).real
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_real_dispatch():
x = np.logspace(-10, 10) + 0.5
dataset = np.vstack((x, gammaln(x))).T
FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
assert_(loggamma(0) == np.inf)
assert_(np.isnan(loggamma(-1)))
def test_gh_6536():
z = loggamma(complex(-3.4, +0.0))
zbar = loggamma(complex(-3.4, -0.0))
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
def test_branch_cut():
# Make sure negative zero is treated correctly
x = -np.logspace(300, -30, 100)
z = np.asarray([complex(x0, 0.0) for x0 in x])
zbar = np.asarray([complex(x0, -0.0) for x0 in x])
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
| 2,058 | 27.205479 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_spence.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
| 1,165 | 32.314286 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_precompute_expn_asy.py
|
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal
from scipy.special._testutils import check_version, MissingModule
from scipy.special._precompute.expn_asy import generate_A
try:
import sympy
from sympy import Poly
except ImportError:
sympy = MissingModule("sympy")
@check_version(sympy, "1.0")
def test_generate_A():
# Data from DLMF 8.20.5
x = sympy.symbols('x')
Astd = [Poly(1, x),
Poly(1, x),
Poly(1 - 2*x),
Poly(1 - 8*x + 6*x**2)]
Ares = generate_A(len(Astd))
for p, q in zip(Astd, Ares):
assert_equal(p, q)
| 649 | 23.074074 | 65 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_pcf.py
|
"""Tests for parabolic cylinder functions.
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
def test_pbwa_segfault():
# Regression test for https://github.com/scipy/scipy/issues/6208.
#
# Data generated by mpmath.
#
w = 1.02276567211316867161
wp = -0.48887053372346189882
assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0)
def test_pbwa_nan():
# Check that NaN's are returned outside of the range in which the
# implementation is accurate.
pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)]
for p in pts:
assert_equal(sc.pbwa(*p), (np.nan, np.nan))
| 664 | 25.6 | 69 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_zeta.py
|
from __future__ import division, print_function, absolute_import
import scipy.special as sc
import numpy as np
from numpy.testing import assert_, assert_equal, assert_allclose
def test_zeta():
assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12)
def test_zeta_1arg():
assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12)
assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12)
def test_zetac():
assert_equal(sc.zetac(0), -1.5)
assert_equal(sc.zetac(1.0), np.inf)
# Expected values in the following were computed using
# Wolfram Alpha `Zeta[x] - 1`:
rtol = 1e-12
assert_allclose(sc.zetac(-2.1), -0.9972705002153750, rtol=rtol)
assert_allclose(sc.zetac(0.8), -5.437538415895550, rtol=rtol)
assert_allclose(sc.zetac(0.9999), -10000.42279161673, rtol=rtol)
assert_allclose(sc.zetac(9), 0.002008392826082214, rtol=rtol)
assert_allclose(sc.zetac(50), 8.881784210930816e-16, rtol=rtol)
assert_allclose(sc.zetac(75), 2.646977960169853e-23, rtol=rtol)
def test_zetac_negative_even():
pts = [-2, -50, -100]
for p in pts:
assert_equal(sc.zetac(p), -1)
def test_zetac_inf():
assert_equal(sc.zetac(np.inf), 0.0)
assert_(np.isnan(sc.zetac(-np.inf)))
| 1,228 | 29.725 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/tests/test_spherical_bessel.py
|
#
# Tests of spherical Bessel functions.
#
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_array_almost_equal)
import pytest
from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
from scipy.integrate import quad
class TestSphericalJn:
def test_spherical_jn_exact(self):
# http://dlmf.nist.gov/10.49.E3
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_jn(2, x),
(-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
def test_spherical_jn_recurrence_complex(self):
# http://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_recurrence_real(self):
# http://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_inf_real(self):
# http://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_jn(n, x), np.array([0, 0]))
def test_spherical_jn_inf_complex(self):
# http://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_jn_large_arg_1(self):
# https://github.com/scipy/scipy/issues/2165
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
def test_spherical_jn_large_arg_2(self):
# https://github.com/scipy/scipy/issues/1641
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
def test_spherical_jn_at_zero(self):
# http://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: j0 = sin(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalYn:
def test_spherical_yn_exact(self):
# http://dlmf.nist.gov/10.49.E5
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_yn(2, x),
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
def test_spherical_yn_recurrence_real(self):
# http://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_recurrence_complex(self):
# http://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_inf_real(self):
# http://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
def test_spherical_yn_inf_complex(self):
# http://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_yn_at_zero(self):
# http://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_yn(n, x), -inf*np.ones(shape=n.shape))
def test_spherical_yn_at_zero_complex(self):
# Consistently with numpy:
# >>> -np.cos(0)/0
# -inf
# >>> -np.cos(0+0j)/(0+0j)
# (-inf + nan*j)
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_yn(n, x), nan*np.ones(shape=n.shape))
class TestSphericalJnYnCrossProduct:
def test_spherical_jn_yn_cross_product_1(self):
# http://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 1, x))
right = 1/x**2
assert_allclose(left, right)
def test_spherical_jn_yn_cross_product_2(self):
# http://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 2, x))
right = (2*n + 3)/x**3
assert_allclose(left, right)
class TestSphericalIn:
def test_spherical_in_exact(self):
# http://dlmf.nist.gov/10.49.E9
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_in(2, x),
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
def test_spherical_in_recurrence_real(self):
# http://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_recurrence_complex(self):
# http://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_inf_real(self):
# http://dlmf.nist.gov/10.52.E3
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
def test_spherical_in_inf_complex(self):
# http://dlmf.nist.gov/10.52.E5
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
# this appears impossible to achieve because C99 regards any complex
# value with at least one infinite part as a complex infinity, so
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
# the correct return value.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
def test_spherical_in_at_zero(self):
# http://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalKn:
def test_spherical_kn_exact(self):
# http://dlmf.nist.gov/10.49.E13
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_kn(2, x),
pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
def test_spherical_kn_recurrence_real(self):
# http://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_recurrence_complex(self):
# http://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_inf_real(self):
# http://dlmf.nist.gov/10.52.E6
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
def test_spherical_kn_inf_complex(self):
# http://dlmf.nist.gov/10.52.E6
# The behavior at complex infinity depends on the sign of the real
# part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
# z*inf. This distinction cannot be captured, so we return nan.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
def test_spherical_kn_at_zero(self):
# http://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_kn(n, x), inf*np.ones(shape=n.shape))
def test_spherical_kn_at_zero_complex(self):
# http://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_kn(n, x), nan*np.ones(shape=n.shape))
class SphericalDerivativesTestCase:
def fundamental_theorem(self, n, a, b):
integral, tolerance = quad(lambda z: self.df(n, z), a, b)
assert_allclose(integral,
self.f(n, b) - self.f(n, a),
atol=tolerance)
@pytest.mark.slow
def test_fundamental_theorem_0(self):
self.fundamental_theorem(0, 3.0, 15.0)
@pytest.mark.slow
def test_fundamental_theorem_7(self):
self.fundamental_theorem(7, 0.5, 1.2)
class TestSphericalJnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_jn(n, z)
def df(self, n, z):
return spherical_jn(n, z, derivative=True)
def test_spherical_jn_d_zero(self):
n = np.array([0, 1, 2, 3, 7, 15])
assert_allclose(spherical_jn(n, 0, derivative=True),
np.array([0, 1/3, 0, 0, 0, 0]))
class TestSphericalYnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_yn(n, z)
def df(self, n, z):
return spherical_yn(n, z, derivative=True)
class TestSphericalInDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_in(n, z)
def df(self, n, z):
return spherical_in(n, z, derivative=True)
def test_spherical_in_d_zero(self):
n = np.array([1, 2, 3, 7, 15])
assert_allclose(spherical_in(n, 0, derivative=True),
np.zeros(5))
class TestSphericalKnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_kn(n, z)
def df(self, n, z):
return spherical_kn(n, z, derivative=True)
class TestSphericalOld:
# These are tests from the TestSpherical class of test_basic.py,
# rewritten to use spherical_* instead of sph_* but otherwise unchanged.
def test_sph_in(self):
# This test reproduces test_basic.TestSpherical.test_sph_in.
i1n = np.empty((2,2))
x = 0.2
i1n[0][0] = spherical_in(0, x)
i1n[0][1] = spherical_in(1, x)
i1n[1][0] = spherical_in(0, x, derivative=True)
i1n[1][1] = spherical_in(1, x, derivative=True)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = np.empty((2,))
sph_i0[0] = spherical_in(0, x)
sph_i0[1] = spherical_in(0, x, derivative=True)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = np.empty((2,))
sph_k0[0] = spherical_kn(0, x)
sph_k0[1] = spherical_kn(0, x, derivative=True)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
def test_sph_jn(self):
s1 = np.empty((2,3))
x = 0.2
s1[0][0] = spherical_jn(0, x)
s1[0][1] = spherical_jn(1, x)
s1[0][2] = spherical_jn(2, x)
s1[1][0] = spherical_jn(0, x, derivative=True)
s1[1][1] = spherical_jn(1, x, derivative=True)
s1[1][2] = spherical_jn(2, x, derivative=True)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_kn(self):
kn = np.empty((2,3))
x = 0.2
kn[0][0] = spherical_kn(0, x)
kn[0][1] = spherical_kn(1, x)
kn[0][2] = spherical_kn(2, x)
kn[1][0] = spherical_kn(0, x, derivative=True)
kn[1][1] = spherical_kn(1, x, derivative=True)
kn[1][2] = spherical_kn(2, x, derivative=True)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = spherical_yn(2, 0.2)
sy2 = spherical_yn(0, 0.2)
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3
sy3 = spherical_yn(1, 0.2, derivative=True)
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
| 14,074 | 36.23545 | 99 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/gammainc_asy.py
|
"""
Precompute coefficients of Temme's asymptotic expansion for gammainc.
This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
Sources:
[1] NIST, "Digital Library of Mathematical Functions",
http://dlmf.nist.gov/
"""
from __future__ import division, print_function, absolute_import
import os
from scipy.special._precompute.utils import lagrange_inversion
try:
import mpmath as mp
except ImportError:
pass
def compute_a(n):
"""a_k from DLMF 5.11.6"""
a = [mp.sqrt(2)/2]
for k in range(1, n):
ak = a[-1]/k
for j in range(1, len(a)):
ak -= a[j]*a[-j]/(j + 1)
ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
a.append(ak)
return a
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = []
for k in range(n):
g.append(mp.sqrt(2)*mp.rf(0.5, k)*a[2*k])
return g
def eta(lam):
"""Function from DLMF 8.12.1 shifted to be centered at 0."""
if lam > 0:
return mp.sqrt(2*(lam - mp.log(lam + 1)))
elif lam < 0:
return -mp.sqrt(2*(lam - mp.log(lam + 1)))
else:
return 0
def compute_alpha(n):
"""alpha_n from DLMF 8.12.13"""
coeffs = mp.taylor(eta, 0, n - 1)
return lagrange_inversion(coeffs)
def compute_d(K, N):
"""d_{k, n} from DLMF 8.12.12"""
M = N + 2*K
d0 = [-mp.mpf(1)/3]
alpha = compute_alpha(M + 2)
for n in range(1, M):
d0.append((n + 2)*alpha[n+2])
d = [d0]
g = compute_g(K)
for k in range(1, K):
dk = []
for n in range(M - 2*k):
dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
d.append(dk)
for k in range(K):
d[k] = d[k][:N]
return d
header = \
r"""/* This file was automatically generated by _precomp/gammainc.py.
* Do not edit it manually!
*/
#ifndef IGAM_H
#define IGAM_H
#define K {}
#define N {}
static const double d[K][N] =
{{"""
footer = \
r"""
#endif
"""
def main():
print(__doc__)
K = 25
N = 25
with mp.workdps(50):
d = compute_d(K, N)
fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
with open(fn + '.new', 'w') as f:
f.write(header.format(K, N))
for k, row in enumerate(d):
row = map(lambda x: mp.nstr(x, 17, min_fixed=0, max_fixed=0), row)
f.write('{')
f.write(", ".join(row))
if k < K - 1:
f.write('},\n')
else:
f.write('}};\n')
f.write(footer)
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| 2,599 | 20.666667 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/loggamma.py
|
"""Precompute series coefficients for log-Gamma."""
from __future__ import division, print_function, absolute_import
try:
import mpmath
except ImportError:
pass
def stirling_series(N):
coeffs = []
with mpmath.workdps(100):
for n in range(1, N + 1):
coeffs.append(mpmath.bernoulli(2*n)/(2*n*(2*n - 1)))
return coeffs
def taylor_series_at_1(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-mpmath.euler)
for n in range(2, N + 1):
coeffs.append((-1)**n*mpmath.zeta(n)/n)
return coeffs
def main():
print(__doc__)
print()
stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in stirling_series(8)[::-1]]
taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in taylor_series_at_1(23)[::-1]]
print("Stirling series coefficients")
print("----------------------------")
print("\n".join(stirling_coeffs))
print()
print("Taylor series coefficients")
print("--------------------------")
print("\n".join(taylor_coeffs))
print()
if __name__ == '__main__':
main()
| 1,179 | 24.106383 | 67 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/expn_asy.py
|
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
http://dlmf.nist.gov/8.20#ii
"""
from __future__ import division, print_function, absolute_import
import os
from scipy._lib._numpy_compat import suppress_warnings
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated")
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("static const double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("static const double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("static const int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| 1,681 | 26.129032 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_name='special', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_precompute', parent_name, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration().todict())
| 374 | 25.785714 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/zetac.py
|
"""Compute the Taylor series for zeta(x) - 1 around x = 0."""
from __future__ import division, print_function, absolute_import
try:
import mpmath
except ImportError:
pass
def zetac_series(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-1.5)
for n in range(1, N):
coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
coeffs.append(coeff)
return coeffs
def main():
print(__doc__)
coeffs = zetac_series(10)
coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in coeffs]
print("\n".join(coeffs[::-1]))
if __name__ == '__main__':
main()
| 657 | 20.933333 | 70 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/utils.py
|
from __future__ import division, print_function, absolute_import
from scipy._lib._numpy_compat import suppress_warnings
try:
import mpmath as mp
except ImportError:
pass
try:
# Can remove when sympy #11255 is resolved; see
# https://github.com/sympy/sympy/issues/11255
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "inspect.getargspec.. is deprecated")
from sympy.abc import x
except ImportError:
pass
def lagrange_inversion(a):
"""Given a series
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
use the Lagrange inversion formula to compute a series
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
necessarily b[0] = 0 too.
The algorithm is naive and could be improved, but speed isn't an
issue here and it's easy to read.
"""
n = len(a)
f = sum(a[i]*x**i for i in range(len(a)))
h = (x/f).series(x, 0, n).removeO()
hpower = [h**0]
for k in range(n):
hpower.append((hpower[-1]*h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append(hpower[k].coeff(x, k - 1)/k)
b = map(lambda x: mp.mpf(x), b)
return b
| 1,239 | 25.382979 | 76 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/struve_convergence.py
|
"""
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| 3,498 | 27.447154 | 108 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/gammainc_data.py
|
"""Compute gammainc and gammaincc for large arguments and parameters
and save the values to data files for use in tests. We can't just
compare to mpmath's gammainc in test_mpmath.TestSystematic because it
would take too long.
Note that mpmath's gammainc is computed using hypercomb, but since it
doesn't allow the user to increase the maximum number of terms used in
the series it doesn't converge for many arguments. To get around this
we copy the mpmath implementation but use more terms.
This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
ram.
Sources:
[1] Fredrik Johansson and others. mpmath: a Python library for
arbitrary-precision floating-point arithmetic (version 0.19),
December 2013. http://mpmath.org/.
"""
from __future__ import division, print_function, absolute_import
import os
from time import time
import numpy as np
from numpy import pi
from scipy.special._mptestutils import mpf2float
try:
import mpmath as mp
except ImportError:
pass
def gammainc(a, x, dps=50, maxterms=10**8):
"""Compute gammainc exactly like mpmath does but allow for more
summands in hypercomb. See
mpmath/functions/expintegrals.py#L134
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
G = [z]
negb = mp.fneg(b, exact=True)
def h(z):
T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
res = mp.hypercomb(h, [z], maxterms=maxterms)
return mpf2float(res)
def gammaincc(a, x, dps=50, maxterms=10**8):
"""Compute gammaincc exactly like mpmath does but allow for more
terms in hypercomb. See
mpmath/functions/expintegrals.py#L187
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a = a, x
if mp.isint(z):
try:
# mpmath has a fast integer path
return mpf2float(mp.gammainc(z, a=a, regularized=True))
except mp.libmp.NoConvergence:
pass
nega = mp.fneg(a, exact=True)
G = [z]
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return mpf2float(mp.hypercomb(h, [z], force_series=True))
except mp.libmp.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
def main():
t0 = time()
# It would be nice to have data for larger values, but either this
# requires prohibitively large precision (dps > 800) or mpmath has
# a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
# value around 0.03, while the true value should be close to 0.5
# (DLMF 8.12.15).
print(__doc__)
pwd = os.path.dirname(__file__)
r = np.logspace(4, 14, 30)
ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
regimes = [(gammainc, ltheta), (gammaincc, utheta)]
for func, theta in regimes:
rg, thetag = np.meshgrid(r, theta)
a, x = rg*np.cos(thetag), rg*np.sin(thetag)
a, x = a.flatten(), x.flatten()
dataset = []
for i, (a0, x0) in enumerate(zip(a, x)):
if func == gammaincc:
# Exploit the fast integer path in gammaincc whenever
# possible so that the computation doesn't take too
# long
a0, x0 = np.floor(a0), np.floor(x0)
dataset.append((a0, x0, func(a0, x0)))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
'{}.txt'.format(func.__name__))
np.savetxt(filename, dataset)
print("{} minutes elapsed".format((time() - t0)/60))
if __name__ == "__main__":
main()
| 4,175 | 31.88189 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/special/_precompute/lambertw.py
|
"""Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt
except ImportError:
pass
def lambertw_pade():
derivs = []
for n in range(6):
derivs.append(mpmath.diff(mpmath.lambertw, 0, n=n))
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| 2,072 | 27.39726 | 68 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/constants.py
|
"""
Collection of physical constants and conversion factors.
Most constants are in SI units, so you can do
print '10 mile per minute is', 10*mile/minute, 'm/s or', 10*mile/(minute*knot), 'knots'
The list is not meant to be comprehensive, but just a convenient list for everyday use.
"""
from __future__ import division, print_function, absolute_import
"""
BasSw 2006
physical constants: imported from CODATA
unit conversion: see e.g. NIST special publication 811
Use at own risk: double-check values before calculating your Mars orbit-insertion burn.
Some constants exist in a few variants, which are marked with suffixes.
The ones without any suffix should be the most common one.
"""
import math as _math
from .codata import value as _cd
import numpy as _np
# mathematical constants
pi = _math.pi
golden = golden_ratio = (1 + _math.sqrt(5)) / 2
# SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
# binary prefixes
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
# physical constants
c = speed_of_light = _cd('speed of light in vacuum')
mu_0 = 4e-7*pi
epsilon_0 = 1 / (mu_0*c*c)
h = Planck = _cd('Planck constant')
hbar = h / (2 * pi)
G = gravitational_constant = _cd('Newtonian constant of gravitation')
g = _cd('standard acceleration of gravity')
e = elementary_charge = _cd('elementary charge')
R = gas_constant = _cd('molar gas constant')
alpha = fine_structure = _cd('fine-structure constant')
N_A = Avogadro = _cd('Avogadro constant')
k = Boltzmann = _cd('Boltzmann constant')
sigma = Stefan_Boltzmann = _cd('Stefan-Boltzmann constant')
Wien = _cd('Wien wavelength displacement law constant')
Rydberg = _cd('Rydberg constant')
# mass in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain # avoirdupois
blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
oz = ounce = pound / 16
stone = 14 * pound
long_ton = 2240 * pound
short_ton = 2000 * pound
troy_ounce = 480 * grain # only for metals / gems
troy_pound = 12 * troy_ounce
carat = 200e-6
m_e = electron_mass = _cd('electron mass')
m_p = proton_mass = _cd('proton mass')
m_n = neutron_mass = _cd('neutron mass')
m_u = u = atomic_mass = _cd('atomic mass constant')
# angle in rad
degree = pi / 180
arcmin = arcminute = degree / 60
arcsec = arcsecond = arcmin / 60
# time in second
minute = 60.0
hour = 60 * minute
day = 24 * hour
week = 7 * day
year = 365 * day
Julian_year = 365.25 * day
# length in meter
inch = 0.0254
foot = 12 * inch
yard = 3 * foot
mile = 1760 * yard
mil = inch / 1000
pt = point = inch / 72 # typography
survey_foot = 1200.0 / 3937
survey_mile = 5280 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870691.0
light_year = Julian_year * c
parsec = au / arcsec
# pressure in pascal
atm = atmosphere = _cd('standard atmosphere')
bar = 1e5
torr = mmHg = atm / 760
psi = pound * g / (inch * inch)
# area in meter**2
hectare = 1e4
acre = 43560 * foot**2
# volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231 * inch**3 # US
# pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42 * gallon_US # for oil
gallon_imp = 4.54609e-3 # UK
fluid_ounce_imp = gallon_imp / 160
# speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
mach = speed_of_sound = 340.5 # approx value at 15 degrees in 1 atm. is this a common value?
knot = nautical_mile / hour
# temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1/1.8 # only for differences
# energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
# Wh = watt_hour
# power in watt
hp = horsepower = 550 * foot * pound * g
# force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
# functions for conversions that are not linear
def convert_temperature(val, old_scale, new_scale):
"""
Convert from a temperature scale to another one among Celsius, Kelvin,
Fahrenheit and Rankine scales.
Parameters
----------
val : array_like
Value(s) of the temperature(s) to be converted expressed in the
original scale.
old_scale: str
Specifies as a string the original scale from which the temperature
value(s) will be converted. Supported scales are Celsius ('Celsius',
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f') and Rankine
('Rankine', 'rankine', 'R', 'r').
new_scale: str
Specifies as a string the new scale to which the temperature
value(s) will be converted. Supported scales are Celsius ('Celsius',
'celsius', 'C' or 'c'), Kelvin ('Kelvin', 'kelvin', 'K', 'k'),
Fahrenheit ('Fahrenheit', 'fahrenheit', 'F' or 'f') and Rankine
('Rankine', 'rankine', 'R', 'r').
Returns
-------
res : float or array of floats
Value(s) of the converted temperature(s) expressed in the new scale.
Notes
-----
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy.constants import convert_temperature
>>> convert_temperature(np.array([-40, 40.0]), 'Celsius', 'Kelvin')
array([ 233.15, 313.15])
"""
# Convert from `old_scale` to Kelvin
if old_scale.lower() in ['celsius', 'c']:
tempo = _np.asanyarray(val) + zero_Celsius
elif old_scale.lower() in ['kelvin', 'k']:
tempo = _np.asanyarray(val)
elif old_scale.lower() in ['fahrenheit', 'f']:
tempo = (_np.asanyarray(val) - 32.) * 5. / 9. + zero_Celsius
elif old_scale.lower() in ['rankine', 'r']:
tempo = _np.asanyarray(val) * 5. / 9.
else:
raise NotImplementedError("%s scale is unsupported: supported scales "
"are Celsius, Kelvin, Fahrenheit and "
"Rankine" % old_scale)
# and from Kelvin to `new_scale`.
if new_scale.lower() in ['celsius', 'c']:
res = tempo - zero_Celsius
elif new_scale.lower() in ['kelvin', 'k']:
res = tempo
elif new_scale.lower() in ['fahrenheit', 'f']:
res = (tempo - zero_Celsius) * 9. / 5. + 32.
elif new_scale.lower() in ['rankine', 'r']:
res = tempo * 9. / 5.
else:
raise NotImplementedError("'%s' scale is unsupported: supported "
"scales are 'Celsius', 'Kelvin', "
"'Fahrenheit' and 'Rankine'" % new_scale)
return res
# optics
def lambda2nu(lambda_):
"""
Convert wavelength to optical frequency
Parameters
----------
lambda_ : array_like
Wavelength(s) to be converted.
Returns
-------
nu : float or array of floats
Equivalent optical frequency.
Notes
-----
Computes ``nu = c / lambda`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants import lambda2nu, speed_of_light
>>> lambda2nu(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return _np.asanyarray(c) / lambda_
def nu2lambda(nu):
"""
Convert optical frequency to wavelength.
Parameters
----------
nu : array_like
Optical frequency to be converted.
Returns
-------
lambda : float or array of floats
Equivalent wavelength(s).
Notes
-----
Computes ``lambda = c / nu`` where c = 299792458.0, i.e., the
(vacuum) speed of light in meters/second.
Examples
--------
>>> from scipy.constants import nu2lambda, speed_of_light
>>> nu2lambda(np.array((1, speed_of_light)))
array([ 2.99792458e+08, 1.00000000e+00])
"""
return c / _np.asanyarray(nu)
| 8,252 | 25.795455 | 93 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/setup.py
|
from __future__ import division, print_function, absolute_import
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('constants', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 414 | 28.642857 | 65 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/codata.py
|
# Compiled by Charles Harris, dated October 3, 2002
# updated to 2002 values by BasSw, 2006
# Updated to 2006 values by Vincent Davis June 2010
# Updated to 2014 values by Joseph Booker, 2015
"""
Fundamental Physical Constants
------------------------------
These constants are taken from CODATA Recommended Values of the Fundamental
Physical Constants 2014.
Object
------
physical_constants : dict
A dictionary containing physical constants. Keys are the names of physical
constants, values are tuples (value, units, precision).
Functions
---------
value(key):
Returns the value of the physical constant(key).
unit(key):
Returns the units of the physical constant(key).
precision(key):
Returns the relative precision of the physical constant(key).
find(sub):
Prints or returns list of keys containing the string sub, default is all.
Source
------
The values of the constants provided at this site are recommended for
international use by CODATA and are the latest available. Termed the "2014
CODATA recommended values," they are generally recognized worldwide for use in
all fields of science and technology. The values became available on 25 June
2015 and replaced the 2010 CODATA set. They are based on all of the data
available through 31 December 2014. The 2014 adjustment was carried out under
the auspices of the CODATA Task Group on Fundamental Constants. Also available
is an introduction to the constants for non-experts at
http://physics.nist.gov/cuu/Constants/introduction.html
References
----------
Theoretical and experimental publications relevant to the fundamental constants
and closely related precision measurements published since the mid 1980s, but
also including many older papers of particular interest, some of which date
back to the 1800s. To search bibliography visit
http://physics.nist.gov/cuu/Constants/
"""
from __future__ import division, print_function, absolute_import
import warnings
from math import pi, sqrt
__all__ = ['physical_constants', 'value', 'unit', 'precision', 'find',
'ConstantWarning']
"""
Source: http://physics.nist.gov/cuu/Constants/index.html
The values of the constants provided at the above site are recommended for
international use by CODATA and are the latest available. Termed the "2006
CODATA recommended values", they are generally recognized worldwide for use
in all fields of science and technology. The values became available in March
2007 and replaced the 2002 CODATA set. They are based on all of the data
available through 31 December 2006. The 2006 adjustment was carried out under
the auspices of the CODATA Task Group on Fundamental Constants.
"""
#
# Source: http://physics.nist.gov/cuu/Constants/index.html
#
# Quantity Value Uncertainty Unit
# ---------------------------------------------------- --------------------- -------------------- -------------
txt2002 = """\
Wien displacement law constant 2.897 7685e-3 0.000 0051e-3 m K
atomic unit of 1st hyperpolarizablity 3.206 361 51e-53 0.000 000 28e-53 C^3 m^3 J^-2
atomic unit of 2nd hyperpolarizablity 6.235 3808e-65 0.000 0011e-65 C^4 m^4 J^-3
atomic unit of electric dipole moment 8.478 353 09e-30 0.000 000 73e-30 C m
atomic unit of electric polarizablity 1.648 777 274e-41 0.000 000 016e-41 C^2 m^2 J^-1
atomic unit of electric quadrupole moment 4.486 551 24e-40 0.000 000 39e-40 C m^2
atomic unit of magn. dipole moment 1.854 801 90e-23 0.000 000 16e-23 J T^-1
atomic unit of magn. flux density 2.350 517 42e5 0.000 000 20e5 T
deuteron magn. moment 0.433 073 482e-26 0.000 000 038e-26 J T^-1
deuteron magn. moment to Bohr magneton ratio 0.466 975 4567e-3 0.000 000 0050e-3
deuteron magn. moment to nuclear magneton ratio 0.857 438 2329 0.000 000 0092
deuteron-electron magn. moment ratio -4.664 345 548e-4 0.000 000 050e-4
deuteron-proton magn. moment ratio 0.307 012 2084 0.000 000 0045
deuteron-neutron magn. moment ratio -0.448 206 52 0.000 000 11
electron gyromagn. ratio 1.760 859 74e11 0.000 000 15e11 s^-1 T^-1
electron gyromagn. ratio over 2 pi 28 024.9532 0.0024 MHz T^-1
electron magn. moment -928.476 412e-26 0.000 080e-26 J T^-1
electron magn. moment to Bohr magneton ratio -1.001 159 652 1859 0.000 000 000 0038
electron magn. moment to nuclear magneton ratio -1838.281 971 07 0.000 000 85
electron magn. moment anomaly 1.159 652 1859e-3 0.000 000 0038e-3
electron to shielded proton magn. moment ratio -658.227 5956 0.000 0071
electron to shielded helion magn. moment ratio 864.058 255 0.000 010
electron-deuteron magn. moment ratio -2143.923 493 0.000 023
electron-muon magn. moment ratio 206.766 9894 0.000 0054
electron-neutron magn. moment ratio 960.920 50 0.000 23
electron-proton magn. moment ratio -658.210 6862 0.000 0066
magn. constant 12.566 370 614...e-7 0 N A^-2
magn. flux quantum 2.067 833 72e-15 0.000 000 18e-15 Wb
muon magn. moment -4.490 447 99e-26 0.000 000 40e-26 J T^-1
muon magn. moment to Bohr magneton ratio -4.841 970 45e-3 0.000 000 13e-3
muon magn. moment to nuclear magneton ratio -8.890 596 98 0.000 000 23
muon-proton magn. moment ratio -3.183 345 118 0.000 000 089
neutron gyromagn. ratio 1.832 471 83e8 0.000 000 46e8 s^-1 T^-1
neutron gyromagn. ratio over 2 pi 29.164 6950 0.000 0073 MHz T^-1
neutron magn. moment -0.966 236 45e-26 0.000 000 24e-26 J T^-1
neutron magn. moment to Bohr magneton ratio -1.041 875 63e-3 0.000 000 25e-3
neutron magn. moment to nuclear magneton ratio -1.913 042 73 0.000 000 45
neutron to shielded proton magn. moment ratio -0.684 996 94 0.000 000 16
neutron-electron magn. moment ratio 1.040 668 82e-3 0.000 000 25e-3
neutron-proton magn. moment ratio -0.684 979 34 0.000 000 16
proton gyromagn. ratio 2.675 222 05e8 0.000 000 23e8 s^-1 T^-1
proton gyromagn. ratio over 2 pi 42.577 4813 0.000 0037 MHz T^-1
proton magn. moment 1.410 606 71e-26 0.000 000 12e-26 J T^-1
proton magn. moment to Bohr magneton ratio 1.521 032 206e-3 0.000 000 015e-3
proton magn. moment to nuclear magneton ratio 2.792 847 351 0.000 000 028
proton magn. shielding correction 25.689e-6 0.015e-6
proton-neutron magn. moment ratio -1.459 898 05 0.000 000 34
shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1
shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1
shielded helion magn. moment -1.074 553 024e-26 0.000 000 093e-26 J T^-1
shielded helion magn. moment to Bohr magneton ratio -1.158 671 474e-3 0.000 000 014e-3
shielded helion magn. moment to nuclear magneton ratio -2.127 497 723 0.000 000 025
shielded helion to proton magn. moment ratio -0.761 766 562 0.000 000 012
shielded helion to shielded proton magn. moment ratio -0.761 786 1313 0.000 000 0033
shielded helion gyromagn. ratio 2.037 894 70e8 0.000 000 18e8 s^-1 T^-1
shielded helion gyromagn. ratio over 2 pi 32.434 1015 0.000 0028 MHz T^-1
shielded proton magn. moment 1.410 570 47e-26 0.000 000 12e-26 J T^-1
shielded proton magn. moment to Bohr magneton ratio 1.520 993 132e-3 0.000 000 016e-3
shielded proton magn. moment to nuclear magneton ratio 2.792 775 604 0.000 000 030
{220} lattice spacing of silicon 192.015 5965e-12 0.000 0070e-12 m"""
txt2006 = """\
lattice spacing of silicon 192.015 5762 e-12 0.000 0050 e-12 m
alpha particle-electron mass ratio 7294.299 5365 0.000 0031
alpha particle mass 6.644 656 20 e-27 0.000 000 33 e-27 kg
alpha particle mass energy equivalent 5.971 919 17 e-10 0.000 000 30 e-10 J
alpha particle mass energy equivalent in MeV 3727.379 109 0.000 093 MeV
alpha particle mass in u 4.001 506 179 127 0.000 000 000 062 u
alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 062 e-3 kg mol^-1
alpha particle-proton mass ratio 3.972 599 689 51 0.000 000 000 41
Angstrom star 1.000 014 98 e-10 0.000 000 90 e-10 m
atomic mass constant 1.660 538 782 e-27 0.000 000 083 e-27 kg
atomic mass constant energy equivalent 1.492 417 830 e-10 0.000 000 074 e-10 J
atomic mass constant energy equivalent in MeV 931.494 028 0.000 023 MeV
atomic mass unit-electron volt relationship 931.494 028 e6 0.000 023 e6 eV
atomic mass unit-hartree relationship 3.423 177 7149 e7 0.000 000 0049 e7 E_h
atomic mass unit-hertz relationship 2.252 342 7369 e23 0.000 000 0032 e23 Hz
atomic mass unit-inverse meter relationship 7.513 006 671 e14 0.000 000 011 e14 m^-1
atomic mass unit-joule relationship 1.492 417 830 e-10 0.000 000 074 e-10 J
atomic mass unit-kelvin relationship 1.080 9527 e13 0.000 0019 e13 K
atomic mass unit-kilogram relationship 1.660 538 782 e-27 0.000 000 083 e-27 kg
atomic unit of 1st hyperpolarizability 3.206 361 533 e-53 0.000 000 081 e-53 C^3 m^3 J^-2
atomic unit of 2nd hyperpolarizability 6.235 380 95 e-65 0.000 000 31 e-65 C^4 m^4 J^-3
atomic unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s
atomic unit of charge 1.602 176 487 e-19 0.000 000 040 e-19 C
atomic unit of charge density 1.081 202 300 e12 0.000 000 027 e12 C m^-3
atomic unit of current 6.623 617 63 e-3 0.000 000 17 e-3 A
atomic unit of electric dipole mom. 8.478 352 81 e-30 0.000 000 21 e-30 C m
atomic unit of electric field 5.142 206 32 e11 0.000 000 13 e11 V m^-1
atomic unit of electric field gradient 9.717 361 66 e21 0.000 000 24 e21 V m^-2
atomic unit of electric polarizability 1.648 777 2536 e-41 0.000 000 0034 e-41 C^2 m^2 J^-1
atomic unit of electric potential 27.211 383 86 0.000 000 68 V
atomic unit of electric quadrupole mom. 4.486 551 07 e-40 0.000 000 11 e-40 C m^2
atomic unit of energy 4.359 743 94 e-18 0.000 000 22 e-18 J
atomic unit of force 8.238 722 06 e-8 0.000 000 41 e-8 N
atomic unit of length 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
atomic unit of mag. dipole mom. 1.854 801 830 e-23 0.000 000 046 e-23 J T^-1
atomic unit of mag. flux density 2.350 517 382 e5 0.000 000 059 e5 T
atomic unit of magnetizability 7.891 036 433 e-29 0.000 000 027 e-29 J T^-2
atomic unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
atomic unit of momentum 1.992 851 565 e-24 0.000 000 099 e-24 kg m s^-1
atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
atomic unit of time 2.418 884 326 505 e-17 0.000 000 000 016 e-17 s
atomic unit of velocity 2.187 691 2541 e6 0.000 000 0015 e6 m s^-1
Avogadro constant 6.022 141 79 e23 0.000 000 30 e23 mol^-1
Bohr magneton 927.400 915 e-26 0.000 023 e-26 J T^-1
Bohr magneton in eV/T 5.788 381 7555 e-5 0.000 000 0079 e-5 eV T^-1
Bohr magneton in Hz/T 13.996 246 04 e9 0.000 000 35 e9 Hz T^-1
Bohr magneton in inverse meters per tesla 46.686 4515 0.000 0012 m^-1 T^-1
Bohr magneton in K/T 0.671 7131 0.000 0012 K T^-1
Bohr radius 0.529 177 208 59 e-10 0.000 000 000 36 e-10 m
Boltzmann constant 1.380 6504 e-23 0.000 0024 e-23 J K^-1
Boltzmann constant in eV/K 8.617 343 e-5 0.000 015 e-5 eV K^-1
Boltzmann constant in Hz/K 2.083 6644 e10 0.000 0036 e10 Hz K^-1
Boltzmann constant in inverse meters per kelvin 69.503 56 0.000 12 m^-1 K^-1
characteristic impedance of vacuum 376.730 313 461... (exact) ohm
classical electron radius 2.817 940 2894 e-15 0.000 000 0058 e-15 m
Compton wavelength 2.426 310 2175 e-12 0.000 000 0033 e-12 m
Compton wavelength over 2 pi 386.159 264 59 e-15 0.000 000 53 e-15 m
conductance quantum 7.748 091 7004 e-5 0.000 000 0053 e-5 S
conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
conventional value of von Klitzing constant 25 812.807 (exact) ohm
Cu x unit 1.002 076 99 e-13 0.000 000 28 e-13 m
deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4
deuteron-electron mass ratio 3670.482 9654 0.000 0016
deuteron g factor 0.857 438 2308 0.000 000 0072
deuteron mag. mom. 0.433 073 465 e-26 0.000 000 011 e-26 J T^-1
deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3
deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072
deuteron mass 3.343 583 20 e-27 0.000 000 17 e-27 kg
deuteron mass energy equivalent 3.005 062 72 e-10 0.000 000 15 e-10 J
deuteron mass energy equivalent in MeV 1875.612 793 0.000 047 MeV
deuteron mass in u 2.013 553 212 724 0.000 000 000 078 u
deuteron molar mass 2.013 553 212 724 e-3 0.000 000 000 078 e-3 kg mol^-1
deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024
deuteron-proton mass ratio 1.999 007 501 08 0.000 000 000 22
deuteron rms charge radius 2.1402 e-15 0.0028 e-15 m
electric constant 8.854 187 817... e-12 (exact) F m^-1
electron charge to mass quotient -1.758 820 150 e11 0.000 000 044 e11 C kg^-1
electron-deuteron mag. mom. ratio -2143.923 498 0.000 018
electron-deuteron mass ratio 2.724 437 1093 e-4 0.000 000 0012 e-4
electron g factor -2.002 319 304 3622 0.000 000 000 0015
electron gyromag. ratio 1.760 859 770 e11 0.000 000 044 e11 s^-1 T^-1
electron gyromag. ratio over 2 pi 28 024.953 64 0.000 70 MHz T^-1
electron mag. mom. -928.476 377 e-26 0.000 023 e-26 J T^-1
electron mag. mom. anomaly 1.159 652 181 11 e-3 0.000 000 000 74 e-3
electron mag. mom. to Bohr magneton ratio -1.001 159 652 181 11 0.000 000 000 000 74
electron mag. mom. to nuclear magneton ratio -1838.281 970 92 0.000 000 80
electron mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
electron mass energy equivalent 8.187 104 38 e-14 0.000 000 41 e-14 J
electron mass energy equivalent in MeV 0.510 998 910 0.000 000 013 MeV
electron mass in u 5.485 799 0943 e-4 0.000 000 0023 e-4 u
electron molar mass 5.485 799 0943 e-7 0.000 000 0023 e-7 kg mol^-1
electron-muon mag. mom. ratio 206.766 9877 0.000 0052
electron-muon mass ratio 4.836 331 71 e-3 0.000 000 12 e-3
electron-neutron mag. mom. ratio 960.920 50 0.000 23
electron-neutron mass ratio 5.438 673 4459 e-4 0.000 000 0033 e-4
electron-proton mag. mom. ratio -658.210 6848 0.000 0054
electron-proton mass ratio 5.446 170 2177 e-4 0.000 000 0024 e-4
electron-tau mass ratio 2.875 64 e-4 0.000 47 e-4
electron to alpha particle mass ratio 1.370 933 555 70 e-4 0.000 000 000 58 e-4
electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
electron volt 1.602 176 487 e-19 0.000 000 040 e-19 J
electron volt-atomic mass unit relationship 1.073 544 188 e-9 0.000 000 027 e-9 u
electron volt-hartree relationship 3.674 932 540 e-2 0.000 000 092 e-2 E_h
electron volt-hertz relationship 2.417 989 454 e14 0.000 000 060 e14 Hz
electron volt-inverse meter relationship 8.065 544 65 e5 0.000 000 20 e5 m^-1
electron volt-joule relationship 1.602 176 487 e-19 0.000 000 040 e-19 J
electron volt-kelvin relationship 1.160 4505 e4 0.000 0020 e4 K
electron volt-kilogram relationship 1.782 661 758 e-36 0.000 000 044 e-36 kg
elementary charge 1.602 176 487 e-19 0.000 000 040 e-19 C
elementary charge over h 2.417 989 454 e14 0.000 000 060 e14 A J^-1
Faraday constant 96 485.3399 0.0024 C mol^-1
Faraday constant for conventional electric current 96 485.3401 0.0048 C_90 mol^-1
Fermi coupling constant 1.166 37 e-5 0.000 01 e-5 GeV^-2
fine-structure constant 7.297 352 5376 e-3 0.000 000 0050 e-3
first radiation constant 3.741 771 18 e-16 0.000 000 19 e-16 W m^2
first radiation constant for spectral radiance 1.191 042 759 e-16 0.000 000 059 e-16 W m^2 sr^-1
hartree-atomic mass unit relationship 2.921 262 2986 e-8 0.000 000 0042 e-8 u
hartree-electron volt relationship 27.211 383 86 0.000 000 68 eV
Hartree energy 4.359 743 94 e-18 0.000 000 22 e-18 J
Hartree energy in eV 27.211 383 86 0.000 000 68 eV
hartree-hertz relationship 6.579 683 920 722 e15 0.000 000 000 044 e15 Hz
hartree-inverse meter relationship 2.194 746 313 705 e7 0.000 000 000 015 e7 m^-1
hartree-joule relationship 4.359 743 94 e-18 0.000 000 22 e-18 J
hartree-kelvin relationship 3.157 7465 e5 0.000 0055 e5 K
hartree-kilogram relationship 4.850 869 34 e-35 0.000 000 24 e-35 kg
helion-electron mass ratio 5495.885 2765 0.000 0052
helion mass 5.006 411 92 e-27 0.000 000 25 e-27 kg
helion mass energy equivalent 4.499 538 64 e-10 0.000 000 22 e-10 J
helion mass energy equivalent in MeV 2808.391 383 0.000 070 MeV
helion mass in u 3.014 932 2473 0.000 000 0026 u
helion molar mass 3.014 932 2473 e-3 0.000 000 0026 e-3 kg mol^-1
helion-proton mass ratio 2.993 152 6713 0.000 000 0026
hertz-atomic mass unit relationship 4.439 821 6294 e-24 0.000 000 0064 e-24 u
hertz-electron volt relationship 4.135 667 33 e-15 0.000 000 10 e-15 eV
hertz-hartree relationship 1.519 829 846 006 e-16 0.000 000 000010e-16 E_h
hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
hertz-joule relationship 6.626 068 96 e-34 0.000 000 33 e-34 J
hertz-kelvin relationship 4.799 2374 e-11 0.000 0084 e-11 K
hertz-kilogram relationship 7.372 496 00 e-51 0.000 000 37 e-51 kg
inverse fine-structure constant 137.035 999 679 0.000 000 094
inverse meter-atomic mass unit relationship 1.331 025 0394 e-15 0.000 000 0019 e-15 u
inverse meter-electron volt relationship 1.239 841 875 e-6 0.000 000 031 e-6 eV
inverse meter-hartree relationship 4.556 335 252 760 e-8 0.000 000 000 030 e-8 E_h
inverse meter-hertz relationship 299 792 458 (exact) Hz
inverse meter-joule relationship 1.986 445 501 e-25 0.000 000 099 e-25 J
inverse meter-kelvin relationship 1.438 7752 e-2 0.000 0025 e-2 K
inverse meter-kilogram relationship 2.210 218 70 e-42 0.000 000 11 e-42 kg
inverse of conductance quantum 12 906.403 7787 0.000 0088 ohm
Josephson constant 483 597.891 e9 0.012 e9 Hz V^-1
joule-atomic mass unit relationship 6.700 536 41 e9 0.000 000 33 e9 u
joule-electron volt relationship 6.241 509 65 e18 0.000 000 16 e18 eV
joule-hartree relationship 2.293 712 69 e17 0.000 000 11 e17 E_h
joule-hertz relationship 1.509 190 450 e33 0.000 000 075 e33 Hz
joule-inverse meter relationship 5.034 117 47 e24 0.000 000 25 e24 m^-1
joule-kelvin relationship 7.242 963 e22 0.000 013 e22 K
joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
kelvin-atomic mass unit relationship 9.251 098 e-14 0.000 016 e-14 u
kelvin-electron volt relationship 8.617 343 e-5 0.000 015 e-5 eV
kelvin-hartree relationship 3.166 8153 e-6 0.000 0055 e-6 E_h
kelvin-hertz relationship 2.083 6644 e10 0.000 0036 e10 Hz
kelvin-inverse meter relationship 69.503 56 0.000 12 m^-1
kelvin-joule relationship 1.380 6504 e-23 0.000 0024 e-23 J
kelvin-kilogram relationship 1.536 1807 e-40 0.000 0027 e-40 kg
kilogram-atomic mass unit relationship 6.022 141 79 e26 0.000 000 30 e26 u
kilogram-electron volt relationship 5.609 589 12 e35 0.000 000 14 e35 eV
kilogram-hartree relationship 2.061 486 16 e34 0.000 000 10 e34 E_h
kilogram-hertz relationship 1.356 392 733 e50 0.000 000 068 e50 Hz
kilogram-inverse meter relationship 4.524 439 15 e41 0.000 000 23 e41 m^-1
kilogram-joule relationship 8.987 551 787... e16 (exact) J
kilogram-kelvin relationship 6.509 651 e39 0.000 011 e39 K
lattice parameter of silicon 543.102 064 e-12 0.000 014 e-12 m
Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7774 e25 0.000 0047 e25 m^-3
mag. constant 12.566 370 614... e-7 (exact) N A^-2
mag. flux quantum 2.067 833 667 e-15 0.000 000 052 e-15 Wb
molar gas constant 8.314 472 0.000 015 J mol^-1 K^-1
molar mass constant 1 e-3 (exact) kg mol^-1
molar mass of carbon-12 12 e-3 (exact) kg mol^-1
molar Planck constant 3.990 312 6821 e-10 0.000 000 0057 e-10 J s mol^-1
molar Planck constant times c 0.119 626 564 72 0.000 000 000 17 J m mol^-1
molar volume of ideal gas (273.15 K, 100 kPa) 22.710 981 e-3 0.000 040 e-3 m^3 mol^-1
molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 996 e-3 0.000 039 e-3 m^3 mol^-1
molar volume of silicon 12.058 8349 e-6 0.000 0011 e-6 m^3 mol^-1
Mo x unit 1.002 099 55 e-13 0.000 000 53 e-13 m
muon Compton wavelength 11.734 441 04 e-15 0.000 000 30 e-15 m
muon Compton wavelength over 2 pi 1.867 594 295 e-15 0.000 000 047 e-15 m
muon-electron mass ratio 206.768 2823 0.000 0052
muon g factor -2.002 331 8414 0.000 000 0012
muon mag. mom. -4.490 447 86 e-26 0.000 000 16 e-26 J T^-1
muon mag. mom. anomaly 1.165 920 69 e-3 0.000 000 60 e-3
muon mag. mom. to Bohr magneton ratio -4.841 970 49 e-3 0.000 000 12 e-3
muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 23
muon mass 1.883 531 30 e-28 0.000 000 11 e-28 kg
muon mass energy equivalent 1.692 833 510 e-11 0.000 000 095 e-11 J
muon mass energy equivalent in MeV 105.658 3668 0.000 0038 MeV
muon mass in u 0.113 428 9256 0.000 000 0029 u
muon molar mass 0.113 428 9256 e-3 0.000 000 0029 e-3 kg mol^-1
muon-neutron mass ratio 0.112 454 5167 0.000 000 0029
muon-proton mag. mom. ratio -3.183 345 137 0.000 000 085
muon-proton mass ratio 0.112 609 5261 0.000 000 0029
muon-tau mass ratio 5.945 92 e-2 0.000 97 e-2
natural unit of action 1.054 571 628 e-34 0.000 000 053 e-34 J s
natural unit of action in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s
natural unit of energy 8.187 104 38 e-14 0.000 000 41 e-14 J
natural unit of energy in MeV 0.510 998 910 0.000 000 013 MeV
natural unit of length 386.159 264 59 e-15 0.000 000 53 e-15 m
natural unit of mass 9.109 382 15 e-31 0.000 000 45 e-31 kg
natural unit of momentum 2.730 924 06 e-22 0.000 000 14 e-22 kg m s^-1
natural unit of momentum in MeV/c 0.510 998 910 0.000 000 013 MeV/c
natural unit of time 1.288 088 6570 e-21 0.000 000 0018 e-21 s
natural unit of velocity 299 792 458 (exact) m s^-1
neutron Compton wavelength 1.319 590 8951 e-15 0.000 000 0020 e-15 m
neutron Compton wavelength over 2 pi 0.210 019 413 82 e-15 0.000 000 000 31 e-15 m
neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
neutron-electron mass ratio 1838.683 6605 0.000 0011
neutron g factor -3.826 085 45 0.000 000 90
neutron gyromag. ratio 1.832 471 85 e8 0.000 000 43 e8 s^-1 T^-1
neutron gyromag. ratio over 2 pi 29.164 6954 0.000 0069 MHz T^-1
neutron mag. mom. -0.966 236 41 e-26 0.000 000 23 e-26 J T^-1
neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45
neutron mass 1.674 927 211 e-27 0.000 000 084 e-27 kg
neutron mass energy equivalent 1.505 349 505 e-10 0.000 000 075 e-10 J
neutron mass energy equivalent in MeV 939.565 346 0.000 023 MeV
neutron mass in u 1.008 664 915 97 0.000 000 000 43 u
neutron molar mass 1.008 664 915 97 e-3 0.000 000 000 43 e-3 kg mol^-1
neutron-muon mass ratio 8.892 484 09 0.000 000 23
neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
neutron-proton mass ratio 1.001 378 419 18 0.000 000 000 46
neutron-tau mass ratio 0.528 740 0.000 086
neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
Newtonian constant of gravitation 6.674 28 e-11 0.000 67 e-11 m^3 kg^-1 s^-2
Newtonian constant of gravitation over h-bar c 6.708 81 e-39 0.000 67 e-39 (GeV/c^2)^-2
nuclear magneton 5.050 783 24 e-27 0.000 000 13 e-27 J T^-1
nuclear magneton in eV/T 3.152 451 2326 e-8 0.000 000 0045 e-8 eV T^-1
nuclear magneton in inverse meters per tesla 2.542 623 616 e-2 0.000 000 064 e-2 m^-1 T^-1
nuclear magneton in K/T 3.658 2637 e-4 0.000 0064 e-4 K T^-1
nuclear magneton in MHz/T 7.622 593 84 0.000 000 19 MHz T^-1
Planck constant 6.626 068 96 e-34 0.000 000 33 e-34 J s
Planck constant in eV s 4.135 667 33 e-15 0.000 000 10 e-15 eV s
Planck constant over 2 pi 1.054 571 628 e-34 0.000 000 053 e-34 J s
Planck constant over 2 pi in eV s 6.582 118 99 e-16 0.000 000 16 e-16 eV s
Planck constant over 2 pi times c in MeV fm 197.326 9631 0.000 0049 MeV fm
Planck length 1.616 252 e-35 0.000 081 e-35 m
Planck mass 2.176 44 e-8 0.000 11 e-8 kg
Planck mass energy equivalent in GeV 1.220 892 e19 0.000 061 e19 GeV
Planck temperature 1.416 785 e32 0.000 071 e32 K
Planck time 5.391 24 e-44 0.000 27 e-44 s
proton charge to mass quotient 9.578 833 92 e7 0.000 000 24 e7 C kg^-1
proton Compton wavelength 1.321 409 8446 e-15 0.000 000 0019 e-15 m
proton Compton wavelength over 2 pi 0.210 308 908 61 e-15 0.000 000 000 30 e-15 m
proton-electron mass ratio 1836.152 672 47 0.000 000 80
proton g factor 5.585 694 713 0.000 000 046
proton gyromag. ratio 2.675 222 099 e8 0.000 000 070 e8 s^-1 T^-1
proton gyromag. ratio over 2 pi 42.577 4821 0.000 0011 MHz T^-1
proton mag. mom. 1.410 606 662 e-26 0.000 000 037 e-26 J T^-1
proton mag. mom. to Bohr magneton ratio 1.521 032 209 e-3 0.000 000 012 e-3
proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023
proton mag. shielding correction 25.694 e-6 0.014 e-6
proton mass 1.672 621 637 e-27 0.000 000 083 e-27 kg
proton mass energy equivalent 1.503 277 359 e-10 0.000 000 075 e-10 J
proton mass energy equivalent in MeV 938.272 013 0.000 023 MeV
proton mass in u 1.007 276 466 77 0.000 000 000 10 u
proton molar mass 1.007 276 466 77 e-3 0.000 000 000 10 e-3 kg mol^-1
proton-muon mass ratio 8.880 243 39 0.000 000 23
proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34
proton-neutron mass ratio 0.998 623 478 24 0.000 000 000 46
proton rms charge radius 0.8768 e-15 0.0069 e-15 m
proton-tau mass ratio 0.528 012 0.000 086
quantum of circulation 3.636 947 5199 e-4 0.000 000 0050 e-4 m^2 s^-1
quantum of circulation times 2 7.273 895 040 e-4 0.000 000 010 e-4 m^2 s^-1
Rydberg constant 10 973 731.568 527 0.000 073 m^-1
Rydberg constant times c in Hz 3.289 841 960 361 e15 0.000 000 000 022 e15 Hz
Rydberg constant times hc in eV 13.605 691 93 0.000 000 34 eV
Rydberg constant times hc in J 2.179 871 97 e-18 0.000 000 11 e-18 J
Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7047 0.000 0044
Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8677 0.000 0044
second radiation constant 1.438 7752 e-2 0.000 0025 e-2 m K
shielded helion gyromag. ratio 2.037 894 730 e8 0.000 000 056 e8 s^-1 T^-1
shielded helion gyromag. ratio over 2 pi 32.434 101 98 0.000 000 90 MHz T^-1
shielded helion mag. mom. -1.074 552 982 e-26 0.000 000 030 e-26 J T^-1
shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025
shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011
shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
shielded proton gyromag. ratio 2.675 153 362 e8 0.000 000 073 e8 s^-1 T^-1
shielded proton gyromag. ratio over 2 pi 42.576 3881 0.000 0012 MHz T^-1
shielded proton mag. mom. 1.410 570 419 e-26 0.000 000 038 e-26 J T^-1
shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030
speed of light in vacuum 299 792 458 (exact) m s^-1
standard acceleration of gravity 9.806 65 (exact) m s^-2
standard atmosphere 101 325 (exact) Pa
Stefan-Boltzmann constant 5.670 400 e-8 0.000 040 e-8 W m^-2 K^-4
tau Compton wavelength 0.697 72 e-15 0.000 11 e-15 m
tau Compton wavelength over 2 pi 0.111 046 e-15 0.000 018 e-15 m
tau-electron mass ratio 3477.48 0.57
tau mass 3.167 77 e-27 0.000 52 e-27 kg
tau mass energy equivalent 2.847 05 e-10 0.000 46 e-10 J
tau mass energy equivalent in MeV 1776.99 0.29 MeV
tau mass in u 1.907 68 0.000 31 u
tau molar mass 1.907 68 e-3 0.000 31 e-3 kg mol^-1
tau-muon mass ratio 16.8183 0.0027
tau-neutron mass ratio 1.891 29 0.000 31
tau-proton mass ratio 1.893 90 0.000 31
Thomson cross section 0.665 245 8558 e-28 0.000 000 0027 e-28 m^2
triton-electron mag. mom. ratio -1.620 514 423 e-3 0.000 000 021 e-3
triton-electron mass ratio 5496.921 5269 0.000 0051
triton g factor 5.957 924 896 0.000 000 076
triton mag. mom. 1.504 609 361 e-26 0.000 000 042 e-26 J T^-1
triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3
triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038
triton mass 5.007 355 88 e-27 0.000 000 25 e-27 kg
triton mass energy equivalent 4.500 387 03 e-10 0.000 000 22 e-10 J
triton mass energy equivalent in MeV 2808.920 906 0.000 070 MeV
triton mass in u 3.015 500 7134 0.000 000 0025 u
triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1
triton-neutron mag. mom. ratio -1.557 185 53 0.000 000 37
triton-proton mag. mom. ratio 1.066 639 908 0.000 000 010
triton-proton mass ratio 2.993 717 0309 0.000 000 0025
unified atomic mass unit 1.660 538 782 e-27 0.000 000 083 e-27 kg
von Klitzing constant 25 812.807 557 0.000 018 ohm
weak mixing angle 0.222 55 0.000 56
Wien frequency displacement law constant 5.878 933 e10 0.000 010 e10 Hz K^-1
Wien wavelength displacement law constant 2.897 7685 e-3 0.000 0051 e-3 m K"""
txt2010 = """\
{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m
alpha particle-electron mass ratio 7294.299 5361 0.000 0029
alpha particle mass 6.644 656 75 e-27 0.000 000 29 e-27 kg
alpha particle mass energy equivalent 5.971 919 67 e-10 0.000 000 26 e-10 J
alpha particle mass energy equivalent in MeV 3727.379 240 0.000 082 MeV
alpha particle mass in u 4.001 506 179 125 0.000 000 000 062 u
alpha particle molar mass 4.001 506 179 125 e-3 0.000 000 000 062 e-3 kg mol^-1
alpha particle-proton mass ratio 3.972 599 689 33 0.000 000 000 36
Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m
atomic mass constant 1.660 538 921 e-27 0.000 000 073 e-27 kg
atomic mass constant energy equivalent 1.492 417 954 e-10 0.000 000 066 e-10 J
atomic mass constant energy equivalent in MeV 931.494 061 0.000 021 MeV
atomic mass unit-electron volt relationship 931.494 061 e6 0.000 021 e6 eV
atomic mass unit-hartree relationship 3.423 177 6845 e7 0.000 000 0024 e7 E_h
atomic mass unit-hertz relationship 2.252 342 7168 e23 0.000 000 0016 e23 Hz
atomic mass unit-inverse meter relationship 7.513 006 6042 e14 0.000 000 0053 e14 m^-1
atomic mass unit-joule relationship 1.492 417 954 e-10 0.000 000 066 e-10 J
atomic mass unit-kelvin relationship 1.080 954 08 e13 0.000 000 98 e13 K
atomic mass unit-kilogram relationship 1.660 538 921 e-27 0.000 000 073 e-27 kg
atomic unit of 1st hyperpolarizability 3.206 361 449 e-53 0.000 000 071 e-53 C^3 m^3 J^-2
atomic unit of 2nd hyperpolarizability 6.235 380 54 e-65 0.000 000 28 e-65 C^4 m^4 J^-3
atomic unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s
atomic unit of charge 1.602 176 565 e-19 0.000 000 035 e-19 C
atomic unit of charge density 1.081 202 338 e12 0.000 000 024 e12 C m^-3
atomic unit of current 6.623 617 95 e-3 0.000 000 15 e-3 A
atomic unit of electric dipole mom. 8.478 353 26 e-30 0.000 000 19 e-30 C m
atomic unit of electric field 5.142 206 52 e11 0.000 000 11 e11 V m^-1
atomic unit of electric field gradient 9.717 362 00 e21 0.000 000 21 e21 V m^-2
atomic unit of electric polarizability 1.648 777 2754 e-41 0.000 000 0016 e-41 C^2 m^2 J^-1
atomic unit of electric potential 27.211 385 05 0.000 000 60 V
atomic unit of electric quadrupole mom. 4.486 551 331 e-40 0.000 000 099 e-40 C m^2
atomic unit of energy 4.359 744 34 e-18 0.000 000 19 e-18 J
atomic unit of force 8.238 722 78 e-8 0.000 000 36 e-8 N
atomic unit of length 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
atomic unit of mag. dipole mom. 1.854 801 936 e-23 0.000 000 041 e-23 J T^-1
atomic unit of mag. flux density 2.350 517 464 e5 0.000 000 052 e5 T
atomic unit of magnetizability 7.891 036 607 e-29 0.000 000 013 e-29 J T^-2
atomic unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
atomic unit of mom.um 1.992 851 740 e-24 0.000 000 088 e-24 kg m s^-1
atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
atomic unit of time 2.418 884 326 502e-17 0.000 000 000 012e-17 s
atomic unit of velocity 2.187 691 263 79 e6 0.000 000 000 71 e6 m s^-1
Avogadro constant 6.022 141 29 e23 0.000 000 27 e23 mol^-1
Bohr magneton 927.400 968 e-26 0.000 020 e-26 J T^-1
Bohr magneton in eV/T 5.788 381 8066 e-5 0.000 000 0038 e-5 eV T^-1
Bohr magneton in Hz/T 13.996 245 55 e9 0.000 000 31 e9 Hz T^-1
Bohr magneton in inverse meters per tesla 46.686 4498 0.000 0010 m^-1 T^-1
Bohr magneton in K/T 0.671 713 88 0.000 000 61 K T^-1
Bohr radius 0.529 177 210 92 e-10 0.000 000 000 17 e-10 m
Boltzmann constant 1.380 6488 e-23 0.000 0013 e-23 J K^-1
Boltzmann constant in eV/K 8.617 3324 e-5 0.000 0078 e-5 eV K^-1
Boltzmann constant in Hz/K 2.083 6618 e10 0.000 0019 e10 Hz K^-1
Boltzmann constant in inverse meters per kelvin 69.503 476 0.000 063 m^-1 K^-1
characteristic impedance of vacuum 376.730 313 461... (exact) ohm
classical electron radius 2.817 940 3267 e-15 0.000 000 0027 e-15 m
Compton wavelength 2.426 310 2389 e-12 0.000 000 0016 e-12 m
Compton wavelength over 2 pi 386.159 268 00 e-15 0.000 000 25 e-15 m
conductance quantum 7.748 091 7346 e-5 0.000 000 0025 e-5 S
conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
conventional value of von Klitzing constant 25 812.807 (exact) ohm
Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m
deuteron-electron mag. mom. ratio -4.664 345 537 e-4 0.000 000 039 e-4
deuteron-electron mass ratio 3670.482 9652 0.000 0015
deuteron g factor 0.857 438 2308 0.000 000 0072
deuteron mag. mom. 0.433 073 489 e-26 0.000 000 010 e-26 J T^-1
deuteron mag. mom. to Bohr magneton ratio 0.466 975 4556 e-3 0.000 000 0039 e-3
deuteron mag. mom. to nuclear magneton ratio 0.857 438 2308 0.000 000 0072
deuteron mass 3.343 583 48 e-27 0.000 000 15 e-27 kg
deuteron mass energy equivalent 3.005 062 97 e-10 0.000 000 13 e-10 J
deuteron mass energy equivalent in MeV 1875.612 859 0.000 041 MeV
deuteron mass in u 2.013 553 212 712 0.000 000 000 077 u
deuteron molar mass 2.013 553 212 712 e-3 0.000 000 000 077 e-3 kg mol^-1
deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
deuteron-proton mag. mom. ratio 0.307 012 2070 0.000 000 0024
deuteron-proton mass ratio 1.999 007 500 97 0.000 000 000 18
deuteron rms charge radius 2.1424 e-15 0.0021 e-15 m
electric constant 8.854 187 817... e-12 (exact) F m^-1
electron charge to mass quotient -1.758 820 088 e11 0.000 000 039 e11 C kg^-1
electron-deuteron mag. mom. ratio -2143.923 498 0.000 018
electron-deuteron mass ratio 2.724 437 1095 e-4 0.000 000 0011 e-4
electron g factor -2.002 319 304 361 53 0.000 000 000 000 53
electron gyromag. ratio 1.760 859 708 e11 0.000 000 039 e11 s^-1 T^-1
electron gyromag. ratio over 2 pi 28 024.952 66 0.000 62 MHz T^-1
electron-helion mass ratio 1.819 543 0761 e-4 0.000 000 0017 e-4
electron mag. mom. -928.476 430 e-26 0.000 021 e-26 J T^-1
electron mag. mom. anomaly 1.159 652 180 76 e-3 0.000 000 000 27 e-3
electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 76 0.000 000 000 000 27
electron mag. mom. to nuclear magneton ratio -1838.281 970 90 0.000 000 75
electron mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
electron mass energy equivalent 8.187 105 06 e-14 0.000 000 36 e-14 J
electron mass energy equivalent in MeV 0.510 998 928 0.000 000 011 MeV
electron mass in u 5.485 799 0946 e-4 0.000 000 0022 e-4 u
electron molar mass 5.485 799 0946 e-7 0.000 000 0022 e-7 kg mol^-1
electron-muon mag. mom. ratio 206.766 9896 0.000 0052
electron-muon mass ratio 4.836 331 66 e-3 0.000 000 12 e-3
electron-neutron mag. mom. ratio 960.920 50 0.000 23
electron-neutron mass ratio 5.438 673 4461 e-4 0.000 000 0032 e-4
electron-proton mag. mom. ratio -658.210 6848 0.000 0054
electron-proton mass ratio 5.446 170 2178 e-4 0.000 000 0022 e-4
electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4
electron to alpha particle mass ratio 1.370 933 555 78 e-4 0.000 000 000 55 e-4
electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
electron-triton mass ratio 1.819 200 0653 e-4 0.000 000 0017 e-4
electron volt 1.602 176 565 e-19 0.000 000 035 e-19 J
electron volt-atomic mass unit relationship 1.073 544 150 e-9 0.000 000 024 e-9 u
electron volt-hartree relationship 3.674 932 379 e-2 0.000 000 081 e-2 E_h
electron volt-hertz relationship 2.417 989 348 e14 0.000 000 053 e14 Hz
electron volt-inverse meter relationship 8.065 544 29 e5 0.000 000 18 e5 m^-1
electron volt-joule relationship 1.602 176 565 e-19 0.000 000 035 e-19 J
electron volt-kelvin relationship 1.160 4519 e4 0.000 0011 e4 K
electron volt-kilogram relationship 1.782 661 845 e-36 0.000 000 039 e-36 kg
elementary charge 1.602 176 565 e-19 0.000 000 035 e-19 C
elementary charge over h 2.417 989 348 e14 0.000 000 053 e14 A J^-1
Faraday constant 96 485.3365 0.0021 C mol^-1
Faraday constant for conventional electric current 96 485.3321 0.0043 C_90 mol^-1
Fermi coupling constant 1.166 364 e-5 0.000 005 e-5 GeV^-2
fine-structure constant 7.297 352 5698 e-3 0.000 000 0024 e-3
first radiation constant 3.741 771 53 e-16 0.000 000 17 e-16 W m^2
first radiation constant for spectral radiance 1.191 042 869 e-16 0.000 000 053 e-16 W m^2 sr^-1
hartree-atomic mass unit relationship 2.921 262 3246 e-8 0.000 000 0021 e-8 u
hartree-electron volt relationship 27.211 385 05 0.000 000 60 eV
Hartree energy 4.359 744 34 e-18 0.000 000 19 e-18 J
Hartree energy in eV 27.211 385 05 0.000 000 60 eV
hartree-hertz relationship 6.579 683 920 729 e15 0.000 000 000 033 e15 Hz
hartree-inverse meter relationship 2.194 746 313 708 e7 0.000 000 000 011 e7 m^-1
hartree-joule relationship 4.359 744 34 e-18 0.000 000 19 e-18 J
hartree-kelvin relationship 3.157 7504 e5 0.000 0029 e5 K
hartree-kilogram relationship 4.850 869 79 e-35 0.000 000 21 e-35 kg
helion-electron mass ratio 5495.885 2754 0.000 0050
helion g factor -4.255 250 613 0.000 000 050
helion mag. mom. -1.074 617 486 e-26 0.000 000 027 e-26 J T^-1
helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3
helion mag. mom. to nuclear magneton ratio -2.127 625 306 0.000 000 025
helion mass 5.006 412 34 e-27 0.000 000 22 e-27 kg
helion mass energy equivalent 4.499 539 02 e-10 0.000 000 20 e-10 J
helion mass energy equivalent in MeV 2808.391 482 0.000 062 MeV
helion mass in u 3.014 932 2468 0.000 000 0025 u
helion molar mass 3.014 932 2468 e-3 0.000 000 0025 e-3 kg mol^-1
helion-proton mass ratio 2.993 152 6707 0.000 000 0025
hertz-atomic mass unit relationship 4.439 821 6689 e-24 0.000 000 0031 e-24 u
hertz-electron volt relationship 4.135 667 516 e-15 0.000 000 091 e-15 eV
hertz-hartree relationship 1.519 829 8460045e-16 0.000 000 0000076e-16 E_h
hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
hertz-joule relationship 6.626 069 57 e-34 0.000 000 29 e-34 J
hertz-kelvin relationship 4.799 2434 e-11 0.000 0044 e-11 K
hertz-kilogram relationship 7.372 496 68 e-51 0.000 000 33 e-51 kg
inverse fine-structure constant 137.035 999 074 0.000 000 044
inverse meter-atomic mass unit relationship 1.331 025 051 20 e-15 0.000 000 000 94 e-15 u
inverse meter-electron volt relationship 1.239 841 930 e-6 0.000 000 027 e-6 eV
inverse meter-hartree relationship 4.556 335 252 755 e-8 0.000 000 000 023 e-8 E_h
inverse meter-hertz relationship 299 792 458 (exact) Hz
inverse meter-joule relationship 1.986 445 684 e-25 0.000 000 088 e-25 J
inverse meter-kelvin relationship 1.438 7770 e-2 0.000 0013 e-2 K
inverse meter-kilogram relationship 2.210 218 902 e-42 0.000 000 098 e-42 kg
inverse of conductance quantum 12 906.403 7217 0.000 0042 ohm
Josephson constant 483 597.870 e9 0.011 e9 Hz V^-1
joule-atomic mass unit relationship 6.700 535 85 e9 0.000 000 30 e9 u
joule-electron volt relationship 6.241 509 34 e18 0.000 000 14 e18 eV
joule-hartree relationship 2.293 712 48 e17 0.000 000 10 e17 E_h
joule-hertz relationship 1.509 190 311 e33 0.000 000 067 e33 Hz
joule-inverse meter relationship 5.034 117 01 e24 0.000 000 22 e24 m^-1
joule-kelvin relationship 7.242 9716 e22 0.000 0066 e22 K
joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
kelvin-atomic mass unit relationship 9.251 0868 e-14 0.000 0084 e-14 u
kelvin-electron volt relationship 8.617 3324 e-5 0.000 0078 e-5 eV
kelvin-hartree relationship 3.166 8114 e-6 0.000 0029 e-6 E_h
kelvin-hertz relationship 2.083 6618 e10 0.000 0019 e10 Hz
kelvin-inverse meter relationship 69.503 476 0.000 063 m^-1
kelvin-joule relationship 1.380 6488 e-23 0.000 0013 e-23 J
kelvin-kilogram relationship 1.536 1790 e-40 0.000 0014 e-40 kg
kilogram-atomic mass unit relationship 6.022 141 29 e26 0.000 000 27 e26 u
kilogram-electron volt relationship 5.609 588 85 e35 0.000 000 12 e35 eV
kilogram-hartree relationship 2.061 485 968 e34 0.000 000 091 e34 E_h
kilogram-hertz relationship 1.356 392 608 e50 0.000 000 060 e50 Hz
kilogram-inverse meter relationship 4.524 438 73 e41 0.000 000 20 e41 m^-1
kilogram-joule relationship 8.987 551 787... e16 (exact) J
kilogram-kelvin relationship 6.509 6582 e39 0.000 0059 e39 K
lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m
Loschmidt constant (273.15 K, 100 kPa) 2.651 6462 e25 0.000 0024 e25 m^-3
Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7805 e25 0.000 0024 e25 m^-3
mag. constant 12.566 370 614... e-7 (exact) N A^-2
mag. flux quantum 2.067 833 758 e-15 0.000 000 046 e-15 Wb
molar gas constant 8.314 4621 0.000 0075 J mol^-1 K^-1
molar mass constant 1 e-3 (exact) kg mol^-1
molar mass of carbon-12 12 e-3 (exact) kg mol^-1
molar Planck constant 3.990 312 7176 e-10 0.000 000 0028 e-10 J s mol^-1
molar Planck constant times c 0.119 626 565 779 0.000 000 000 084 J m mol^-1
molar volume of ideal gas (273.15 K, 100 kPa) 22.710 953 e-3 0.000 021 e-3 m^3 mol^-1
molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 968 e-3 0.000 020 e-3 m^3 mol^-1
molar volume of silicon 12.058 833 01 e-6 0.000 000 80 e-6 m^3 mol^-1
Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m
muon Compton wavelength 11.734 441 03 e-15 0.000 000 30 e-15 m
muon Compton wavelength over 2 pi 1.867 594 294 e-15 0.000 000 047 e-15 m
muon-electron mass ratio 206.768 2843 0.000 0052
muon g factor -2.002 331 8418 0.000 000 0013
muon mag. mom. -4.490 448 07 e-26 0.000 000 15 e-26 J T^-1
muon mag. mom. anomaly 1.165 920 91 e-3 0.000 000 63 e-3
muon mag. mom. to Bohr magneton ratio -4.841 970 44 e-3 0.000 000 12 e-3
muon mag. mom. to nuclear magneton ratio -8.890 596 97 0.000 000 22
muon mass 1.883 531 475 e-28 0.000 000 096 e-28 kg
muon mass energy equivalent 1.692 833 667 e-11 0.000 000 086 e-11 J
muon mass energy equivalent in MeV 105.658 3715 0.000 0035 MeV
muon mass in u 0.113 428 9267 0.000 000 0029 u
muon molar mass 0.113 428 9267 e-3 0.000 000 0029 e-3 kg mol^-1
muon-neutron mass ratio 0.112 454 5177 0.000 000 0028
muon-proton mag. mom. ratio -3.183 345 107 0.000 000 084
muon-proton mass ratio 0.112 609 5272 0.000 000 0028
muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2
natural unit of action 1.054 571 726 e-34 0.000 000 047 e-34 J s
natural unit of action in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s
natural unit of energy 8.187 105 06 e-14 0.000 000 36 e-14 J
natural unit of energy in MeV 0.510 998 928 0.000 000 011 MeV
natural unit of length 386.159 268 00 e-15 0.000 000 25 e-15 m
natural unit of mass 9.109 382 91 e-31 0.000 000 40 e-31 kg
natural unit of mom.um 2.730 924 29 e-22 0.000 000 12 e-22 kg m s^-1
natural unit of mom.um in MeV/c 0.510 998 928 0.000 000 011 MeV/c
natural unit of time 1.288 088 668 33 e-21 0.000 000 000 83 e-21 s
natural unit of velocity 299 792 458 (exact) m s^-1
neutron Compton wavelength 1.319 590 9068 e-15 0.000 000 0011 e-15 m
neutron Compton wavelength over 2 pi 0.210 019 415 68 e-15 0.000 000 000 17 e-15 m
neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
neutron-electron mass ratio 1838.683 6605 0.000 0011
neutron g factor -3.826 085 45 0.000 000 90
neutron gyromag. ratio 1.832 471 79 e8 0.000 000 43 e8 s^-1 T^-1
neutron gyromag. ratio over 2 pi 29.164 6943 0.000 0069 MHz T^-1
neutron mag. mom. -0.966 236 47 e-26 0.000 000 23 e-26 J T^-1
neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
neutron mag. mom. to nuclear magneton ratio -1.913 042 72 0.000 000 45
neutron mass 1.674 927 351 e-27 0.000 000 074 e-27 kg
neutron mass energy equivalent 1.505 349 631 e-10 0.000 000 066 e-10 J
neutron mass energy equivalent in MeV 939.565 379 0.000 021 MeV
neutron mass in u 1.008 664 916 00 0.000 000 000 43 u
neutron molar mass 1.008 664 916 00 e-3 0.000 000 000 43 e-3 kg mol^-1
neutron-muon mass ratio 8.892 484 00 0.000 000 22
neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
neutron-proton mass difference 2.305 573 92 e-30 0.000 000 76 e-30
neutron-proton mass difference energy equivalent 2.072 146 50 e-13 0.000 000 68 e-13
neutron-proton mass difference energy equivalent in MeV 1.293 332 17 0.000 000 42
neutron-proton mass difference in u 0.001 388 449 19 0.000 000 000 45
neutron-proton mass ratio 1.001 378 419 17 0.000 000 000 45
neutron-tau mass ratio 0.528 790 0.000 048
neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
Newtonian constant of gravitation 6.673 84 e-11 0.000 80 e-11 m^3 kg^-1 s^-2
Newtonian constant of gravitation over h-bar c 6.708 37 e-39 0.000 80 e-39 (GeV/c^2)^-2
nuclear magneton 5.050 783 53 e-27 0.000 000 11 e-27 J T^-1
nuclear magneton in eV/T 3.152 451 2605 e-8 0.000 000 0022 e-8 eV T^-1
nuclear magneton in inverse meters per tesla 2.542 623 527 e-2 0.000 000 056 e-2 m^-1 T^-1
nuclear magneton in K/T 3.658 2682 e-4 0.000 0033 e-4 K T^-1
nuclear magneton in MHz/T 7.622 593 57 0.000 000 17 MHz T^-1
Planck constant 6.626 069 57 e-34 0.000 000 29 e-34 J s
Planck constant in eV s 4.135 667 516 e-15 0.000 000 091 e-15 eV s
Planck constant over 2 pi 1.054 571 726 e-34 0.000 000 047 e-34 J s
Planck constant over 2 pi in eV s 6.582 119 28 e-16 0.000 000 15 e-16 eV s
Planck constant over 2 pi times c in MeV fm 197.326 9718 0.000 0044 MeV fm
Planck length 1.616 199 e-35 0.000 097 e-35 m
Planck mass 2.176 51 e-8 0.000 13 e-8 kg
Planck mass energy equivalent in GeV 1.220 932 e19 0.000 073 e19 GeV
Planck temperature 1.416 833 e32 0.000 085 e32 K
Planck time 5.391 06 e-44 0.000 32 e-44 s
proton charge to mass quotient 9.578 833 58 e7 0.000 000 21 e7 C kg^-1
proton Compton wavelength 1.321 409 856 23 e-15 0.000 000 000 94 e-15 m
proton Compton wavelength over 2 pi 0.210 308 910 47 e-15 0.000 000 000 15 e-15 m
proton-electron mass ratio 1836.152 672 45 0.000 000 75
proton g factor 5.585 694 713 0.000 000 046
proton gyromag. ratio 2.675 222 005 e8 0.000 000 063 e8 s^-1 T^-1
proton gyromag. ratio over 2 pi 42.577 4806 0.000 0010 MHz T^-1
proton mag. mom. 1.410 606 743 e-26 0.000 000 033 e-26 J T^-1
proton mag. mom. to Bohr magneton ratio 1.521 032 210 e-3 0.000 000 012 e-3
proton mag. mom. to nuclear magneton ratio 2.792 847 356 0.000 000 023
proton mag. shielding correction 25.694 e-6 0.014 e-6
proton mass 1.672 621 777 e-27 0.000 000 074 e-27 kg
proton mass energy equivalent 1.503 277 484 e-10 0.000 000 066 e-10 J
proton mass energy equivalent in MeV 938.272 046 0.000 021 MeV
proton mass in u 1.007 276 466 812 0.000 000 000 090 u
proton molar mass 1.007 276 466 812 e-3 0.000 000 000 090 e-3 kg mol^-1
proton-muon mass ratio 8.880 243 31 0.000 000 22
proton-neutron mag. mom. ratio -1.459 898 06 0.000 000 34
proton-neutron mass ratio 0.998 623 478 26 0.000 000 000 45
proton rms charge radius 0.8775 e-15 0.0051 e-15 m
proton-tau mass ratio 0.528 063 0.000 048
quantum of circulation 3.636 947 5520 e-4 0.000 000 0024 e-4 m^2 s^-1
quantum of circulation times 2 7.273 895 1040 e-4 0.000 000 0047 e-4 m^2 s^-1
Rydberg constant 10 973 731.568 539 0.000 055 m^-1
Rydberg constant times c in Hz 3.289 841 960 364 e15 0.000 000 000 017 e15 Hz
Rydberg constant times hc in eV 13.605 692 53 0.000 000 30 eV
Rydberg constant times hc in J 2.179 872 171 e-18 0.000 000 096 e-18 J
Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7078 0.000 0023
Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8708 0.000 0023
second radiation constant 1.438 7770 e-2 0.000 0013 e-2 m K
shielded helion gyromag. ratio 2.037 894 659 e8 0.000 000 051 e8 s^-1 T^-1
shielded helion gyromag. ratio over 2 pi 32.434 100 84 0.000 000 81 MHz T^-1
shielded helion mag. mom. -1.074 553 044 e-26 0.000 000 027 e-26 J T^-1
shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
shielded helion mag. mom. to nuclear magneton ratio -2.127 497 718 0.000 000 025
shielded helion to proton mag. mom. ratio -0.761 766 558 0.000 000 011
shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
shielded proton gyromag. ratio 2.675 153 268 e8 0.000 000 066 e8 s^-1 T^-1
shielded proton gyromag. ratio over 2 pi 42.576 3866 0.000 0010 MHz T^-1
shielded proton mag. mom. 1.410 570 499 e-26 0.000 000 035 e-26 J T^-1
shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
shielded proton mag. mom. to nuclear magneton ratio 2.792 775 598 0.000 000 030
speed of light in vacuum 299 792 458 (exact) m s^-1
standard acceleration of gravity 9.806 65 (exact) m s^-2
standard atmosphere 101 325 (exact) Pa
standard-state pressure 100 000 (exact) Pa
Stefan-Boltzmann constant 5.670 373 e-8 0.000 021 e-8 W m^-2 K^-4
tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m
tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m
tau-electron mass ratio 3477.15 0.31
tau mass 3.167 47 e-27 0.000 29 e-27 kg
tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J
tau mass energy equivalent in MeV 1776.82 0.16 MeV
tau mass in u 1.907 49 0.000 17 u
tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1
tau-muon mass ratio 16.8167 0.0015
tau-neutron mass ratio 1.891 11 0.000 17
tau-proton mass ratio 1.893 72 0.000 17
Thomson cross section 0.665 245 8734 e-28 0.000 000 0013 e-28 m^2
triton-electron mass ratio 5496.921 5267 0.000 0050
triton g factor 5.957 924 896 0.000 000 076
triton mag. mom. 1.504 609 447 e-26 0.000 000 038 e-26 J T^-1
triton mag. mom. to Bohr magneton ratio 1.622 393 657 e-3 0.000 000 021 e-3
triton mag. mom. to nuclear magneton ratio 2.978 962 448 0.000 000 038
triton mass 5.007 356 30 e-27 0.000 000 22 e-27 kg
triton mass energy equivalent 4.500 387 41 e-10 0.000 000 20 e-10 J
triton mass energy equivalent in MeV 2808.921 005 0.000 062 MeV
triton mass in u 3.015 500 7134 0.000 000 0025 u
triton molar mass 3.015 500 7134 e-3 0.000 000 0025 e-3 kg mol^-1
triton-proton mass ratio 2.993 717 0308 0.000 000 0025
unified atomic mass unit 1.660 538 921 e-27 0.000 000 073 e-27 kg
von Klitzing constant 25 812.807 4434 0.000 0084 ohm
weak mixing angle 0.2223 0.0021
Wien frequency displacement law constant 5.878 9254 e10 0.000 0053 e10 Hz K^-1
Wien wavelength displacement law constant 2.897 7721 e-3 0.000 0026 e-3 m K"""
txt2014 = """\
{220} lattice spacing of silicon 192.015 5714 e-12 0.000 0032 e-12 m
alpha particle-electron mass ratio 7294.299 541 36 0.000 000 24
alpha particle mass 6.644 657 230 e-27 0.000 000 082 e-27 kg
alpha particle mass energy equivalent 5.971 920 097 e-10 0.000 000 073 e-10 J
alpha particle mass energy equivalent in MeV 3727.379 378 0.000 023 MeV
alpha particle mass in u 4.001 506 179 127 0.000 000 000 063 u
alpha particle molar mass 4.001 506 179 127 e-3 0.000 000 000 063 e-3 kg mol^-1
alpha particle-proton mass ratio 3.972 599 689 07 0.000 000 000 36
Angstrom star 1.000 014 95 e-10 0.000 000 90 e-10 m
atomic mass constant 1.660 539 040 e-27 0.000 000 020 e-27 kg
atomic mass constant energy equivalent 1.492 418 062 e-10 0.000 000 018 e-10 J
atomic mass constant energy equivalent in MeV 931.494 0954 0.000 0057 MeV
atomic mass unit-electron volt relationship 931.494 0954 e6 0.000 0057 e6 eV
atomic mass unit-hartree relationship 3.423 177 6902 e7 0.000 000 0016 e7 E_h
atomic mass unit-hertz relationship 2.252 342 7206 e23 0.000 000 0010 e23 Hz
atomic mass unit-inverse meter relationship 7.513 006 6166 e14 0.000 000 0034 e14 m^-1
atomic mass unit-joule relationship 1.492 418 062 e-10 0.000 000 018 e-10 J
atomic mass unit-kelvin relationship 1.080 954 38 e13 0.000 000 62 e13 K
atomic mass unit-kilogram relationship 1.660 539 040 e-27 0.000 000 020 e-27 kg
atomic unit of 1st hyperpolarizability 3.206 361 329 e-53 0.000 000 020 e-53 C^3 m^3 J^-2
atomic unit of 2nd hyperpolarizability 6.235 380 085 e-65 0.000 000 077 e-65 C^4 m^4 J^-3
atomic unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s
atomic unit of charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C
atomic unit of charge density 1.081 202 3770 e12 0.000 000 0067 e12 C m^-3
atomic unit of current 6.623 618 183 e-3 0.000 000 041 e-3 A
atomic unit of electric dipole mom. 8.478 353 552 e-30 0.000 000 052 e-30 C m
atomic unit of electric field 5.142 206 707 e11 0.000 000 032 e11 V m^-1
atomic unit of electric field gradient 9.717 362 356 e21 0.000 000 060 e21 V m^-2
atomic unit of electric polarizability 1.648 777 2731 e-41 0.000 000 0011 e-41 C^2 m^2 J^-1
atomic unit of electric potential 27.211 386 02 0.000 000 17 V
atomic unit of electric quadrupole mom. 4.486 551 484 e-40 0.000 000 028 e-40 C m^2
atomic unit of energy 4.359 744 650 e-18 0.000 000 054 e-18 J
atomic unit of force 8.238 723 36 e-8 0.000 000 10 e-8 N
atomic unit of length 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
atomic unit of mag. dipole mom. 1.854 801 999 e-23 0.000 000 011 e-23 J T^-1
atomic unit of mag. flux density 2.350 517 550 e5 0.000 000 014 e5 T
atomic unit of magnetizability 7.891 036 5886 e-29 0.000 000 0090 e-29 J T^-2
atomic unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
atomic unit of mom.um 1.992 851 882 e-24 0.000 000 024 e-24 kg m s^-1
atomic unit of permittivity 1.112 650 056... e-10 (exact) F m^-1
atomic unit of time 2.418 884 326509e-17 0.000 000 000014e-17 s
atomic unit of velocity 2.187 691 262 77 e6 0.000 000 000 50 e6 m s^-1
Avogadro constant 6.022 140 857 e23 0.000 000 074 e23 mol^-1
Bohr magneton 927.400 9994 e-26 0.000 0057 e-26 J T^-1
Bohr magneton in eV/T 5.788 381 8012 e-5 0.000 000 0026 e-5 eV T^-1
Bohr magneton in Hz/T 13.996 245 042 e9 0.000 000 086 e9 Hz T^-1
Bohr magneton in inverse meters per tesla 46.686 448 14 0.000 000 29 m^-1 T^-1
Bohr magneton in K/T 0.671 714 05 0.000 000 39 K T^-1
Bohr radius 0.529 177 210 67 e-10 0.000 000 000 12 e-10 m
Boltzmann constant 1.380 648 52 e-23 0.000 000 79 e-23 J K^-1
Boltzmann constant in eV/K 8.617 3303 e-5 0.000 0050 e-5 eV K^-1
Boltzmann constant in Hz/K 2.083 6612 e10 0.000 0012 e10 Hz K^-1
Boltzmann constant in inverse meters per kelvin 69.503 457 0.000 040 m^-1 K^-1
characteristic impedance of vacuum 376.730 313 461... (exact) ohm
classical electron radius 2.817 940 3227 e-15 0.000 000 0019 e-15 m
Compton wavelength 2.426 310 2367 e-12 0.000 000 0011 e-12 m
Compton wavelength over 2 pi 386.159 267 64 e-15 0.000 000 18 e-15 m
conductance quantum 7.748 091 7310 e-5 0.000 000 0018 e-5 S
conventional value of Josephson constant 483 597.9 e9 (exact) Hz V^-1
conventional value of von Klitzing constant 25 812.807 (exact) ohm
Cu x unit 1.002 076 97 e-13 0.000 000 28 e-13 m
deuteron-electron mag. mom. ratio -4.664 345 535 e-4 0.000 000 026 e-4
deuteron-electron mass ratio 3670.482 967 85 0.000 000 13
deuteron g factor 0.857 438 2311 0.000 000 0048
deuteron mag. mom. 0.433 073 5040 e-26 0.000 000 0036 e-26 J T^-1
deuteron mag. mom. to Bohr magneton ratio 0.466 975 4554 e-3 0.000 000 0026 e-3
deuteron mag. mom. to nuclear magneton ratio 0.857 438 2311 0.000 000 0048
deuteron mass 3.343 583 719 e-27 0.000 000 041 e-27 kg
deuteron mass energy equivalent 3.005 063 183 e-10 0.000 000 037 e-10 J
deuteron mass energy equivalent in MeV 1875.612 928 0.000 012 MeV
deuteron mass in u 2.013 553 212 745 0.000 000 000 040 u
deuteron molar mass 2.013 553 212 745 e-3 0.000 000 000 040 e-3 kg mol^-1
deuteron-neutron mag. mom. ratio -0.448 206 52 0.000 000 11
deuteron-proton mag. mom. ratio 0.307 012 2077 0.000 000 0015
deuteron-proton mass ratio 1.999 007 500 87 0.000 000 000 19
deuteron rms charge radius 2.1413 e-15 0.0025 e-15 m
electric constant 8.854 187 817... e-12 (exact) F m^-1
electron charge to mass quotient -1.758 820 024 e11 0.000 000 011 e11 C kg^-1
electron-deuteron mag. mom. ratio -2143.923 499 0.000 012
electron-deuteron mass ratio 2.724 437 107 484 e-4 0.000 000 000 096 e-4
electron g factor -2.002 319 304 361 82 0.000 000 000 000 52
electron gyromag. ratio 1.760 859 644 e11 0.000 000 011 e11 s^-1 T^-1
electron gyromag. ratio over 2 pi 28 024.951 64 0.000 17 MHz T^-1
electron-helion mass ratio 1.819 543 074 854 e-4 0.000 000 000 088 e-4
electron mag. mom. -928.476 4620 e-26 0.000 0057 e-26 J T^-1
electron mag. mom. anomaly 1.159 652 180 91 e-3 0.000 000 000 26 e-3
electron mag. mom. to Bohr magneton ratio -1.001 159 652 180 91 0.000 000 000 000 26
electron mag. mom. to nuclear magneton ratio -1838.281 972 34 0.000 000 17
electron mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
electron mass energy equivalent 8.187 105 65 e-14 0.000 000 10 e-14 J
electron mass energy equivalent in MeV 0.510 998 9461 0.000 000 0031 MeV
electron mass in u 5.485 799 090 70 e-4 0.000 000 000 16 e-4 u
electron molar mass 5.485 799 090 70 e-7 0.000 000 000 16 e-7 kg mol^-1
electron-muon mag. mom. ratio 206.766 9880 0.000 0046
electron-muon mass ratio 4.836 331 70 e-3 0.000 000 11 e-3
electron-neutron mag. mom. ratio 960.920 50 0.000 23
electron-neutron mass ratio 5.438 673 4428 e-4 0.000 000 0027 e-4
electron-proton mag. mom. ratio -658.210 6866 0.000 0020
electron-proton mass ratio 5.446 170 213 52 e-4 0.000 000 000 52 e-4
electron-tau mass ratio 2.875 92 e-4 0.000 26 e-4
electron to alpha particle mass ratio 1.370 933 554 798 e-4 0.000 000 000 045 e-4
electron to shielded helion mag. mom. ratio 864.058 257 0.000 010
electron to shielded proton mag. mom. ratio -658.227 5971 0.000 0072
electron-triton mass ratio 1.819 200 062 203 e-4 0.000 000 000 084 e-4
electron volt 1.602 176 6208 e-19 0.000 000 0098 e-19 J
electron volt-atomic mass unit relationship 1.073 544 1105 e-9 0.000 000 0066 e-9 u
electron volt-hartree relationship 3.674 932 248 e-2 0.000 000 023 e-2 E_h
electron volt-hertz relationship 2.417 989 262 e14 0.000 000 015 e14 Hz
electron volt-inverse meter relationship 8.065 544 005 e5 0.000 000 050 e5 m^-1
electron volt-joule relationship 1.602 176 6208 e-19 0.000 000 0098 e-19 J
electron volt-kelvin relationship 1.160 452 21 e4 0.000 000 67 e4 K
electron volt-kilogram relationship 1.782 661 907 e-36 0.000 000 011 e-36 kg
elementary charge 1.602 176 6208 e-19 0.000 000 0098 e-19 C
elementary charge over h 2.417 989 262 e14 0.000 000 015 e14 A J^-1
Faraday constant 96 485.332 89 0.000 59 C mol^-1
Faraday constant for conventional electric current 96 485.3251 0.0012 C_90 mol^-1
Fermi coupling constant 1.166 3787 e-5 0.000 0006 e-5 GeV^-2
fine-structure constant 7.297 352 5664 e-3 0.000 000 0017 e-3
first radiation constant 3.741 771 790 e-16 0.000 000 046 e-16 W m^2
first radiation constant for spectral radiance 1.191 042 953 e-16 0.000 000 015 e-16 W m^2 sr^-1
hartree-atomic mass unit relationship 2.921 262 3197 e-8 0.000 000 0013 e-8 u
hartree-electron volt relationship 27.211 386 02 0.000 000 17 eV
Hartree energy 4.359 744 650 e-18 0.000 000 054 e-18 J
Hartree energy in eV 27.211 386 02 0.000 000 17 eV
hartree-hertz relationship 6.579 683 920 711 e15 0.000 000 000 039 e15 Hz
hartree-inverse meter relationship 2.194 746 313 702 e7 0.000 000 000 013 e7 m^-1
hartree-joule relationship 4.359 744 650 e-18 0.000 000 054 e-18 J
hartree-kelvin relationship 3.157 7513 e5 0.000 0018 e5 K
hartree-kilogram relationship 4.850 870 129 e-35 0.000 000 060 e-35 kg
helion-electron mass ratio 5495.885 279 22 0.000 000 27
helion g factor -4.255 250 616 0.000 000 050
helion mag. mom. -1.074 617 522 e-26 0.000 000 014 e-26 J T^-1
helion mag. mom. to Bohr magneton ratio -1.158 740 958 e-3 0.000 000 014 e-3
helion mag. mom. to nuclear magneton ratio -2.127 625 308 0.000 000 025
helion mass 5.006 412 700 e-27 0.000 000 062 e-27 kg
helion mass energy equivalent 4.499 539 341 e-10 0.000 000 055 e-10 J
helion mass energy equivalent in MeV 2808.391 586 0.000 017 MeV
helion mass in u 3.014 932 246 73 0.000 000 000 12 u
helion molar mass 3.014 932 246 73 e-3 0.000 000 000 12 e-3 kg mol^-1
helion-proton mass ratio 2.993 152 670 46 0.000 000 000 29
hertz-atomic mass unit relationship 4.439 821 6616 e-24 0.000 000 0020 e-24 u
hertz-electron volt relationship 4.135 667 662 e-15 0.000 000 025 e-15 eV
hertz-hartree relationship 1.5198298460088 e-16 0.0000000000090e-16 E_h
hertz-inverse meter relationship 3.335 640 951... e-9 (exact) m^-1
hertz-joule relationship 6.626 070 040 e-34 0.000 000 081 e-34 J
hertz-kelvin relationship 4.799 2447 e-11 0.000 0028 e-11 K
hertz-kilogram relationship 7.372 497 201 e-51 0.000 000 091 e-51 kg
inverse fine-structure constant 137.035 999 139 0.000 000 031
inverse meter-atomic mass unit relationship 1.331 025 049 00 e-15 0.000 000 000 61 e-15 u
inverse meter-electron volt relationship 1.239 841 9739 e-6 0.000 000 0076 e-6 eV
inverse meter-hartree relationship 4.556 335 252 767 e-8 0.000 000 000 027 e-8 E_h
inverse meter-hertz relationship 299 792 458 (exact) Hz
inverse meter-joule relationship 1.986 445 824 e-25 0.000 000 024 e-25 J
inverse meter-kelvin relationship 1.438 777 36 e-2 0.000 000 83 e-2 K
inverse meter-kilogram relationship 2.210 219 057 e-42 0.000 000 027 e-42 kg
inverse of conductance quantum 12 906.403 7278 0.000 0029 ohm
Josephson constant 483 597.8525 e9 0.0030 e9 Hz V^-1
joule-atomic mass unit relationship 6.700 535 363 e9 0.000 000 082 e9 u
joule-electron volt relationship 6.241 509 126 e18 0.000 000 038 e18 eV
joule-hartree relationship 2.293 712 317 e17 0.000 000 028 e17 E_h
joule-hertz relationship 1.509 190 205 e33 0.000 000 019 e33 Hz
joule-inverse meter relationship 5.034 116 651 e24 0.000 000 062 e24 m^-1
joule-kelvin relationship 7.242 9731 e22 0.000 0042 e22 K
joule-kilogram relationship 1.112 650 056... e-17 (exact) kg
kelvin-atomic mass unit relationship 9.251 0842 e-14 0.000 0053 e-14 u
kelvin-electron volt relationship 8.617 3303 e-5 0.000 0050 e-5 eV
kelvin-hartree relationship 3.166 8105 e-6 0.000 0018 e-6 E_h
kelvin-hertz relationship 2.083 6612 e10 0.000 0012 e10 Hz
kelvin-inverse meter relationship 69.503 457 0.000 040 m^-1
kelvin-joule relationship 1.380 648 52 e-23 0.000 000 79 e-23 J
kelvin-kilogram relationship 1.536 178 65 e-40 0.000 000 88 e-40 kg
kilogram-atomic mass unit relationship 6.022 140 857 e26 0.000 000 074 e26 u
kilogram-electron volt relationship 5.609 588 650 e35 0.000 000 034 e35 eV
kilogram-hartree relationship 2.061 485 823 e34 0.000 000 025 e34 E_h
kilogram-hertz relationship 1.356 392 512 e50 0.000 000 017 e50 Hz
kilogram-inverse meter relationship 4.524 438 411 e41 0.000 000 056 e41 m^-1
kilogram-joule relationship 8.987 551 787... e16 (exact) J
kilogram-kelvin relationship 6.509 6595 e39 0.000 0037 e39 K
lattice parameter of silicon 543.102 0504 e-12 0.000 0089 e-12 m
Loschmidt constant (273.15 K, 100 kPa) 2.651 6467 e25 0.000 0015 e25 m^-3
Loschmidt constant (273.15 K, 101.325 kPa) 2.686 7811 e25 0.000 0015 e25 m^-3
mag. constant 12.566 370 614... e-7 (exact) N A^-2
mag. flux quantum 2.067 833 831 e-15 0.000 000 013 e-15 Wb
molar gas constant 8.314 4598 0.000 0048 J mol^-1 K^-1
molar mass constant 1 e-3 (exact) kg mol^-1
molar mass of carbon-12 12 e-3 (exact) kg mol^-1
molar Planck constant 3.990 312 7110 e-10 0.000 000 0018 e-10 J s mol^-1
molar Planck constant times c 0.119 626 565 582 0.000 000 000 054 J m mol^-1
molar volume of ideal gas (273.15 K, 100 kPa) 22.710 947 e-3 0.000 013 e-3 m^3 mol^-1
molar volume of ideal gas (273.15 K, 101.325 kPa) 22.413 962 e-3 0.000 013 e-3 m^3 mol^-1
molar volume of silicon 12.058 832 14 e-6 0.000 000 61 e-6 m^3 mol^-1
Mo x unit 1.002 099 52 e-13 0.000 000 53 e-13 m
muon Compton wavelength 11.734 441 11 e-15 0.000 000 26 e-15 m
muon Compton wavelength over 2 pi 1.867 594 308 e-15 0.000 000 042 e-15 m
muon-electron mass ratio 206.768 2826 0.000 0046
muon g factor -2.002 331 8418 0.000 000 0013
muon mag. mom. -4.490 448 26 e-26 0.000 000 10 e-26 J T^-1
muon mag. mom. anomaly 1.165 920 89 e-3 0.000 000 63 e-3
muon mag. mom. to Bohr magneton ratio -4.841 970 48 e-3 0.000 000 11 e-3
muon mag. mom. to nuclear magneton ratio -8.890 597 05 0.000 000 20
muon mass 1.883 531 594 e-28 0.000 000 048 e-28 kg
muon mass energy equivalent 1.692 833 774 e-11 0.000 000 043 e-11 J
muon mass energy equivalent in MeV 105.658 3745 0.000 0024 MeV
muon mass in u 0.113 428 9257 0.000 000 0025 u
muon molar mass 0.113 428 9257 e-3 0.000 000 0025 e-3 kg mol^-1
muon-neutron mass ratio 0.112 454 5167 0.000 000 0025
muon-proton mag. mom. ratio -3.183 345 142 0.000 000 071
muon-proton mass ratio 0.112 609 5262 0.000 000 0025
muon-tau mass ratio 5.946 49 e-2 0.000 54 e-2
natural unit of action 1.054 571 800 e-34 0.000 000 013 e-34 J s
natural unit of action in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s
natural unit of energy 8.187 105 65 e-14 0.000 000 10 e-14 J
natural unit of energy in MeV 0.510 998 9461 0.000 000 0031 MeV
natural unit of length 386.159 267 64 e-15 0.000 000 18 e-15 m
natural unit of mass 9.109 383 56 e-31 0.000 000 11 e-31 kg
natural unit of mom.um 2.730 924 488 e-22 0.000 000 034 e-22 kg m s^-1
natural unit of mom.um in MeV/c 0.510 998 9461 0.000 000 0031 MeV/c
natural unit of time 1.288 088 667 12 e-21 0.000 000 000 58 e-21 s
natural unit of velocity 299 792 458 (exact) m s^-1
neutron Compton wavelength 1.319 590 904 81 e-15 0.000 000 000 88 e-15 m
neutron Compton wavelength over 2 pi 0.210 019 415 36 e-15 0.000 000 000 14 e-15 m
neutron-electron mag. mom. ratio 1.040 668 82 e-3 0.000 000 25 e-3
neutron-electron mass ratio 1838.683 661 58 0.000 000 90
neutron g factor -3.826 085 45 0.000 000 90
neutron gyromag. ratio 1.832 471 72 e8 0.000 000 43 e8 s^-1 T^-1
neutron gyromag. ratio over 2 pi 29.164 6933 0.000 0069 MHz T^-1
neutron mag. mom. -0.966 236 50 e-26 0.000 000 23 e-26 J T^-1
neutron mag. mom. to Bohr magneton ratio -1.041 875 63 e-3 0.000 000 25 e-3
neutron mag. mom. to nuclear magneton ratio -1.913 042 73 0.000 000 45
neutron mass 1.674 927 471 e-27 0.000 000 021 e-27 kg
neutron mass energy equivalent 1.505 349 739 e-10 0.000 000 019 e-10 J
neutron mass energy equivalent in MeV 939.565 4133 0.000 0058 MeV
neutron mass in u 1.008 664 915 88 0.000 000 000 49 u
neutron molar mass 1.008 664 915 88 e-3 0.000 000 000 49 e-3 kg mol^-1
neutron-muon mass ratio 8.892 484 08 0.000 000 20
neutron-proton mag. mom. ratio -0.684 979 34 0.000 000 16
neutron-proton mass difference 2.305 573 77 e-30 0.000 000 85 e-30
neutron-proton mass difference energy equivalent 2.072 146 37 e-13 0.000 000 76 e-13
neutron-proton mass difference energy equivalent in MeV 1.293 332 05 0.000 000 48
neutron-proton mass difference in u 0.001 388 449 00 0.000 000 000 51
neutron-proton mass ratio 1.001 378 418 98 0.000 000 000 51
neutron-tau mass ratio 0.528 790 0.000 048
neutron to shielded proton mag. mom. ratio -0.684 996 94 0.000 000 16
Newtonian constant of gravitation 6.674 08 e-11 0.000 31 e-11 m^3 kg^-1 s^-2
Newtonian constant of gravitation over h-bar c 6.708 61 e-39 0.000 31 e-39 (GeV/c^2)^-2
nuclear magneton 5.050 783 699 e-27 0.000 000 031 e-27 J T^-1
nuclear magneton in eV/T 3.152 451 2550 e-8 0.000 000 0015 e-8 eV T^-1
nuclear magneton in inverse meters per tesla 2.542 623 432 e-2 0.000 000 016 e-2 m^-1 T^-1
nuclear magneton in K/T 3.658 2690 e-4 0.000 0021 e-4 K T^-1
nuclear magneton in MHz/T 7.622 593 285 0.000 000 047 MHz T^-1
Planck constant 6.626 070 040 e-34 0.000 000 081 e-34 J s
Planck constant in eV s 4.135 667 662 e-15 0.000 000 025 e-15 eV s
Planck constant over 2 pi 1.054 571 800 e-34 0.000 000 013 e-34 J s
Planck constant over 2 pi in eV s 6.582 119 514 e-16 0.000 000 040 e-16 eV s
Planck constant over 2 pi times c in MeV fm 197.326 9788 0.000 0012 MeV fm
Planck length 1.616 229 e-35 0.000 038 e-35 m
Planck mass 2.176 470 e-8 0.000 051 e-8 kg
Planck mass energy equivalent in GeV 1.220 910 e19 0.000 029 e19 GeV
Planck temperature 1.416 808 e32 0.000 033 e32 K
Planck time 5.391 16 e-44 0.000 13 e-44 s
proton charge to mass quotient 9.578 833 226 e7 0.000 000 059 e7 C kg^-1
proton Compton wavelength 1.321 409 853 96 e-15 0.000 000 000 61 e-15 m
proton Compton wavelength over 2 pi 0.210 308910109e-15 0.000 000 000097e-15 m
proton-electron mass ratio 1836.152 673 89 0.000 000 17
proton g factor 5.585 694 702 0.000 000 017
proton gyromag. ratio 2.675 221 900 e8 0.000 000 018 e8 s^-1 T^-1
proton gyromag. ratio over 2 pi 42.577 478 92 0.000 000 29 MHz T^-1
proton mag. mom. 1.410 606 7873 e-26 0.000 000 0097 e-26 J T^-1
proton mag. mom. to Bohr magneton ratio 1.521 032 2053 e-3 0.000 000 0046 e-3
proton mag. mom. to nuclear magneton ratio 2.792 847 3508 0.000 000 0085
proton mag. shielding correction 25.691 e-6 0.011 e-6
proton mass 1.672 621 898 e-27 0.000 000 021 e-27 kg
proton mass energy equivalent 1.503 277 593 e-10 0.000 000 018 e-10 J
proton mass energy equivalent in MeV 938.272 0813 0.000 0058 MeV
proton mass in u 1.007 276 466 879 0.000 000 000 091 u
proton molar mass 1.007 276 466 879 e-3 0.000 000 000 091 e-3 kg mol^-1
proton-muon mass ratio 8.880 243 38 0.000 000 20
proton-neutron mag. mom. ratio -1.459 898 05 0.000 000 34
proton-neutron mass ratio 0.998 623 478 44 0.000 000 000 51
proton rms charge radius 0.8751 e-15 0.0061 e-15 m
proton-tau mass ratio 0.528 063 0.000 048
quantum of circulation 3.636 947 5486 e-4 0.000 000 0017 e-4 m^2 s^-1
quantum of circulation times 2 7.273 895 0972 e-4 0.000 000 0033 e-4 m^2 s^-1
Rydberg constant 10 973 731.568 508 0.000 065 m^-1
Rydberg constant times c in Hz 3.289 841 960 355 e15 0.000 000 000 019 e15 Hz
Rydberg constant times hc in eV 13.605 693 009 0.000 000 084 eV
Rydberg constant times hc in J 2.179 872 325 e-18 0.000 000 027 e-18 J
Sackur-Tetrode constant (1 K, 100 kPa) -1.151 7084 0.000 0014
Sackur-Tetrode constant (1 K, 101.325 kPa) -1.164 8714 0.000 0014
second radiation constant 1.438 777 36 e-2 0.000 000 83 e-2 m K
shielded helion gyromag. ratio 2.037 894 585 e8 0.000 000 027 e8 s^-1 T^-1
shielded helion gyromag. ratio over 2 pi 32.434 099 66 0.000 000 43 MHz T^-1
shielded helion mag. mom. -1.074 553 080 e-26 0.000 000 014 e-26 J T^-1
shielded helion mag. mom. to Bohr magneton ratio -1.158 671 471 e-3 0.000 000 014 e-3
shielded helion mag. mom. to nuclear magneton ratio -2.127 497 720 0.000 000 025
shielded helion to proton mag. mom. ratio -0.761 766 5603 0.000 000 0092
shielded helion to shielded proton mag. mom. ratio -0.761 786 1313 0.000 000 0033
shielded proton gyromag. ratio 2.675 153 171 e8 0.000 000 033 e8 s^-1 T^-1
shielded proton gyromag. ratio over 2 pi 42.576 385 07 0.000 000 53 MHz T^-1
shielded proton mag. mom. 1.410 570 547 e-26 0.000 000 018 e-26 J T^-1
shielded proton mag. mom. to Bohr magneton ratio 1.520 993 128 e-3 0.000 000 017 e-3
shielded proton mag. mom. to nuclear magneton ratio 2.792 775 600 0.000 000 030
speed of light in vacuum 299 792 458 (exact) m s^-1
standard acceleration of gravity 9.806 65 (exact) m s^-2
standard atmosphere 101 325 (exact) Pa
standard-state pressure 100 000 (exact) Pa
Stefan-Boltzmann constant 5.670 367 e-8 0.000 013 e-8 W m^-2 K^-4
tau Compton wavelength 0.697 787 e-15 0.000 063 e-15 m
tau Compton wavelength over 2 pi 0.111 056 e-15 0.000 010 e-15 m
tau-electron mass ratio 3477.15 0.31
tau mass 3.167 47 e-27 0.000 29 e-27 kg
tau mass energy equivalent 2.846 78 e-10 0.000 26 e-10 J
tau mass energy equivalent in MeV 1776.82 0.16 MeV
tau mass in u 1.907 49 0.000 17 u
tau molar mass 1.907 49 e-3 0.000 17 e-3 kg mol^-1
tau-muon mass ratio 16.8167 0.0015
tau-neutron mass ratio 1.891 11 0.000 17
tau-proton mass ratio 1.893 72 0.000 17
Thomson cross section 0.665 245 871 58 e-28 0.000 000 000 91 e-28 m^2
triton-electron mass ratio 5496.921 535 88 0.000 000 26
triton g factor 5.957 924 920 0.000 000 028
triton mag. mom. 1.504 609 503 e-26 0.000 000 012 e-26 J T^-1
triton mag. mom. to Bohr magneton ratio 1.622 393 6616 e-3 0.000 000 0076 e-3
triton mag. mom. to nuclear magneton ratio 2.978 962 460 0.000 000 014
triton mass 5.007 356 665 e-27 0.000 000 062 e-27 kg
triton mass energy equivalent 4.500 387 735 e-10 0.000 000 055 e-10 J
triton mass energy equivalent in MeV 2808.921 112 0.000 017 MeV
triton mass in u 3.015 500 716 32 0.000 000 000 11 u
triton molar mass 3.015 500 716 32 e-3 0.000 000 000 11 e-3 kg mol^-1
triton-proton mass ratio 2.993 717 033 48 0.000 000 000 22
unified atomic mass unit 1.660 539 040 e-27 0.000 000 020 e-27 kg
von Klitzing constant 25 812.807 4555 0.000 0059 ohm
weak mixing angle 0.2223 0.0021
Wien frequency displacement law constant 5.878 9238 e10 0.000 0034 e10 Hz K^-1
Wien wavelength displacement law constant 2.897 7729 e-3 0.000 0017 e-3 m K"""
# -----------------------------------------------------------------------------
physical_constants = {}
def parse_constants(d):
constants = {}
for line in d.split('\n'):
name = line[:55].rstrip()
val = line[55:77].replace(' ', '').replace('...', '')
val = float(val)
uncert = line[77:99].replace(' ', '').replace('(exact)', '0')
uncert = float(uncert)
units = line[99:].rstrip()
constants[name] = (val, units, uncert)
return constants
_physical_constants_2002 = parse_constants(txt2002)
_physical_constants_2006 = parse_constants(txt2006)
_physical_constants_2010 = parse_constants(txt2010)
_physical_constants_2014 = parse_constants(txt2014)
physical_constants.update(_physical_constants_2002)
physical_constants.update(_physical_constants_2006)
physical_constants.update(_physical_constants_2010)
physical_constants.update(_physical_constants_2014)
_current_constants = _physical_constants_2014
_current_codata = "CODATA 2014"
# check obsolete values
_obsolete_constants = {}
for k in physical_constants:
if k not in _current_constants:
_obsolete_constants[k] = True
# generate some additional aliases
_aliases = {}
for k in _physical_constants_2002:
if 'magn.' in k:
_aliases[k] = k.replace('magn.', 'mag.')
for k in _physical_constants_2006:
if 'momentum' in k:
_aliases[k] = k.replace('momentum', 'mom.um')
class ConstantWarning(DeprecationWarning):
"""Accessing a constant no longer in current CODATA data set"""
pass
def _check_obsolete(key):
if key in _obsolete_constants and key not in _aliases:
warnings.warn("Constant '%s' is not in current %s data set" % (
key, _current_codata), ConstantWarning)
def value(key):
"""
Value in physical_constants indexed by key
Parameters
----------
key : Python string or unicode
Key in dictionary `physical_constants`
Returns
-------
value : float
Value in `physical_constants` corresponding to `key`
See Also
--------
codata : Contains the description of `physical_constants`, which, as a
dictionary literal object, does not itself possess a docstring.
Examples
--------
>>> from scipy import constants
>>> constants.value(u'elementary charge')
1.6021766208e-19
"""
_check_obsolete(key)
return physical_constants[key][0]
def unit(key):
"""
Unit in physical_constants indexed by key
Parameters
----------
key : Python string or unicode
Key in dictionary `physical_constants`
Returns
-------
unit : Python string
Unit in `physical_constants` corresponding to `key`
See Also
--------
codata : Contains the description of `physical_constants`, which, as a
dictionary literal object, does not itself possess a docstring.
Examples
--------
>>> from scipy import constants
>>> constants.unit(u'proton mass')
'kg'
"""
_check_obsolete(key)
return physical_constants[key][1]
def precision(key):
"""
Relative precision in physical_constants indexed by key
Parameters
----------
key : Python string or unicode
Key in dictionary `physical_constants`
Returns
-------
prec : float
Relative precision in `physical_constants` corresponding to `key`
See Also
--------
codata : Contains the description of `physical_constants`, which, as a
dictionary literal object, does not itself possess a docstring.
Examples
--------
>>> from scipy import constants
>>> constants.precision(u'proton mass')
1.2555138746605121e-08
"""
_check_obsolete(key)
return physical_constants[key][2] / physical_constants[key][0]
def find(sub=None, disp=False):
"""
Return list of physical_constant keys containing a given string.
Parameters
----------
sub : str, unicode
Sub-string to search keys for. By default, return all keys.
disp : bool
If True, print the keys that are found, and return None.
Otherwise, return the list of keys without printing anything.
Returns
-------
keys : list or None
If `disp` is False, the list of keys is returned.
Otherwise, None is returned.
See Also
--------
codata : Contains the description of `physical_constants`, which, as a
dictionary literal object, does not itself possess a docstring.
Examples
--------
>>> from scipy.constants import find, physical_constants
Which keys in the ``physical_constants`` dictionary contain 'boltzmann'?
>>> find('boltzmann')
['Boltzmann constant',
'Boltzmann constant in Hz/K',
'Boltzmann constant in eV/K',
'Boltzmann constant in inverse meters per kelvin',
'Stefan-Boltzmann constant']
Get the constant called 'Boltzmann constant in Hz/K':
>>> physical_constants['Boltzmann constant in Hz/K']
(20836612000.0, 'Hz K^-1', 12000.0)
Find constants with 'radius' in the key:
>>> find('radius')
['Bohr radius',
'classical electron radius',
'deuteron rms charge radius',
'proton rms charge radius']
>>> physical_constants['classical electron radius']
(2.8179403227e-15, 'm', 1.9e-24)
"""
if sub is None:
result = list(_current_constants.keys())
else:
result = [key for key in _current_constants
if sub.lower() in key.lower()]
result.sort()
if disp:
for key in result:
print(key)
return
else:
return result
# Table is lacking some digits for exact values: calculate from definition
c = value('speed of light in vacuum')
mu0 = 4e-7 * pi
epsilon0 = 1 / (mu0 * c * c)
exact_values = {
'mag. constant': (mu0, 'N A^-2', 0.0),
'electric constant': (epsilon0, 'F m^-1', 0.0),
'characteristic impedance of vacuum': (sqrt(mu0 / epsilon0), 'ohm', 0.0),
'atomic unit of permittivity': (4 * epsilon0 * pi, 'F m^-1', 0.0),
'joule-kilogram relationship': (1 / (c * c), 'kg', 0.0),
'kilogram-joule relationship': (c * c, 'J', 0.0),
'hertz-inverse meter relationship': (1 / c, 'm^-1', 0.0)
}
# sanity check
for key in exact_values:
val = _current_constants[key][0]
if abs(exact_values[key][0] - val) / val > 1e-9:
raise ValueError("Constants.codata: exact values too far off.")
physical_constants.update(exact_values)
# finally, insert aliases for values
for k, v in list(_aliases.items()):
if v in _current_constants:
physical_constants[k] = physical_constants[v]
else:
del _aliases[k]
| 116,026 | 82.713564 | 113 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/__init__.py
|
r"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
================ =================================================================
``pi`` Pi
``golden`` Golden ratio
``golden_ratio`` Golden ratio
================ =================================================================
Physical constants
==================
=========================== =================================================================
``c`` speed of light in vacuum
``speed_of_light`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``Planck`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``gravitational_constant`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``elementary_charge`` elementary charge
``R`` molar gas constant
``gas_constant`` molar gas constant
``alpha`` fine-structure constant
``fine_structure`` fine-structure constant
``N_A`` Avogadro constant
``Avogadro`` Avogadro constant
``k`` Boltzmann constant
``Boltzmann`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``electron_mass`` electron mass
``m_p`` proton mass
``proton_mass`` proton mass
``m_n`` neutron mass
``neutron_mass`` neutron mass
=========================== =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2014 CODATA recommended values [CODATA2014]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Mass
----
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``pound`` one pound (avoirdupous) in kg
``blob`` one inch version of a slug in kg (added in 1.0.0)
``slinch`` one inch version of a slug in kg (added in 1.0.0)
``slug`` one slug in kg (added in 1.0.0)
``oz`` one ounce in kg
``ounce`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
``u`` atomic mass constant (in kg)
``atomic_mass`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcminute`` arc minute in radians
``arcsec`` arc second in radians
``arcsecond`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
===================== ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``point`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``astronomical_unit`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
===================== ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``atmosphere`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``mmHg`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``litre`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_US`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
``barrel`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================== ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================== ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
convert_temperature
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``electron_volt`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_th`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
``horsepower`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``dyne`` one dyne in newtons
``lbf`` one pound force in newtons
``pound_force`` one pound force in newtons
``kgf`` one kilogram force in newtons
``kilogram_force`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2014] CODATA Recommended Values of the Fundamental
Physical Constants 2014.
http://physics.nist.gov/cuu/Constants/index.html
"""
from __future__ import division, print_function, absolute_import
# Modules contributed by BasSw (wegwerp@gmail.com)
from .codata import *
from .constants import *
from .codata import _obsolete_constants
_constant_names = [(_k.lower(), _k, _v)
for _k, _v in physical_constants.items()
if _k not in _obsolete_constants]
_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
_x[2][0], _x[2][1])
for _x in sorted(_constant_names)])
if __doc__ is not None:
__doc__ = __doc__ % dict(constant_names=_constant_names)
del _constant_names
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 12,203 | 34.788856 | 94 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/tests/test_constants.py
|
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_equal, assert_allclose
import scipy.constants as sc
def test_convert_temperature():
assert_equal(sc.convert_temperature(32, 'f', 'Celsius'), 0)
assert_equal(sc.convert_temperature([0, 0], 'celsius', 'Kelvin'),
[273.15, 273.15])
assert_equal(sc.convert_temperature([0, 0], 'kelvin', 'c'),
[-273.15, -273.15])
assert_equal(sc.convert_temperature([32, 32], 'f', 'k'), [273.15, 273.15])
assert_equal(sc.convert_temperature([273.15, 273.15], 'kelvin', 'F'),
[32, 32])
assert_equal(sc.convert_temperature([0, 0], 'C', 'fahrenheit'), [32, 32])
assert_allclose(sc.convert_temperature([0, 0], 'c', 'r'), [491.67, 491.67],
rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'Rankine', 'C'),
[0., 0.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 491.67], 'r', 'F'),
[32., 32.], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([32, 32], 'fahrenheit', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([273.15, 273.15], 'K', 'R'),
[491.67, 491.67], rtol=0., atol=1e-13)
assert_allclose(sc.convert_temperature([491.67, 0.], 'rankine', 'kelvin'),
[273.15, 0.], rtol=0., atol=1e-13)
def test_lambda_to_nu():
assert_equal(sc.lambda2nu(sc.speed_of_light), 1)
def test_nu_to_lambda():
assert_equal(sc.nu2lambda(1), sc.speed_of_light)
| 1,646 | 42.342105 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/constants/tests/test_codata.py
|
from __future__ import division, print_function, absolute_import
from scipy.constants import constants, codata, find, value
from numpy.testing import (assert_equal, assert_,
assert_almost_equal)
def test_find():
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of mom.um',
'natural unit of mom.um in MeV/c',
'natural unit of length',
'natural unit of time']))
def test_basic_table_parse():
c = 'speed of light in vacuum'
assert_equal(codata.value(c), constants.c)
assert_equal(codata.value(c), constants.speed_of_light)
def test_basic_lookup():
assert_equal('%d %s' % (codata.c, codata.unit('speed of light in vacuum')),
'299792458 m s^-1')
def test_find_all():
assert_(len(codata.find(disp=False)) > 300)
def test_find_single():
assert_equal(codata.find('Wien freq', disp=False)[0],
'Wien frequency displacement law constant')
def test_2002_vs_2006():
assert_almost_equal(codata.value('magn. flux quantum'),
codata.value('mag. flux quantum'))
def test_exact_values():
# Check that updating stored values with exact ones worked.
for key in codata.exact_values:
assert_((codata.exact_values[key][0] - value(key)) / value(key) == 0)
| 1,935 | 32.37931 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/fitpack2.py
|
"""
fitpack --- curve and surface fitting with splines
fitpack is based on a collection of Fortran routines DIERCKX
by P. Dierckx (see http://www.netlib.org/dierckx/) transformed
to double routines by Pearu Peterson.
"""
# Created by Pearu Peterson, June,August 2003
from __future__ import division, print_function, absolute_import
__all__ = [
'UnivariateSpline',
'InterpolatedUnivariateSpline',
'LSQUnivariateSpline',
'BivariateSpline',
'LSQBivariateSpline',
'SmoothBivariateSpline',
'LSQSphereBivariateSpline',
'SmoothSphereBivariateSpline',
'RectBivariateSpline',
'RectSphereBivariateSpline']
import warnings
from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones
import numpy as np
from . import fitpack
from . import dfitpack
################ Univariate spline ####################
_curfit_messages = {1:"""
The required storage space exceeds the available storage space, as
specified by the parameter nest: nest too small. If nest is already
large (say nest > m/2), it may also indicate that s is too small.
The approximation returned is the weighted least-squares spline
according to the knots t[0],t[1],...,t[n-1]. (n=nest) the parameter fp
gives the corresponding weighted sum of squared residuals (fp>s).
""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
3:"""
The maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached: s
too small.
There is an approximation returned but the corresponding weighted sum
of squared residuals does not satisfy the condition abs(fp-s)/s < tol.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[0]<x[1]<...<x[m-1]<=xe, w[i]>0, i=0..m-1
if iopt=-1:
xb<t[k+1]<t[k+2]<...<t[n-k-2]<xe"""
}
# UnivariateSpline, ext parameter can be an int or a string
_extrap_modes = {0: 0, 'extrapolate': 0,
1: 1, 'zeros': 1,
2: 2, 'raise': 2,
3: 3, 'const': 3}
class UnivariateSpline(object):
"""
One-dimensional smoothing spline fit to a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `s`
specifies the number of knots by specifying a smoothing condition.
Parameters
----------
x : (N,) array_like
1-D array of independent input data. Must be increasing.
y : (N,) array_like
1-D array of dependent input data, of the same length as `x`.
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be <= 5.
Default is k=3, a cubic spline.
s : float or None, optional
Positive smoothing factor used to choose the number of knots. Number
of knots will be increased until the smoothing condition is satisfied::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0) <= s
If None (default), ``s = len(w)`` which should be a good value if
``1/w[i]`` is an estimate of the standard deviation of ``y[i]``.
If 0, spline will interpolate through all data points.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
InterpolatedUnivariateSpline : Subclass with smoothing forced to 0
LSQUnivariateSpline : Subclass in which knots are user-selected instead of
being set by smoothing condition
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
**NaN handling**: If the input arrays contain ``nan`` values, the result
is not useful, since the underlying spline fitting routines cannot deal
with ``nan`` . A workaround is to use zero weights for not-a-number
data points:
>>> from scipy.interpolate import UnivariateSpline
>>> x, y = np.array([1, 2, 3, 4]), np.array([1, np.nan, 3, 4])
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> spl = UnivariateSpline(x, y, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> plt.plot(x, y, 'ro', ms=5)
Use the default value for the smoothing parameter:
>>> spl = UnivariateSpline(x, y)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3)
Manually change the amount of smoothing:
>>> spl.set_smoothing_factor(0.5)
>>> plt.plot(xs, spl(xs), 'b', lw=3)
>>> plt.show()
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3, s=None,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("x and y array must not contain NaNs or infs.")
if not all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=s)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
@classmethod
def _from_tck(cls, tck, ext=0):
"""Construct a spline object from given tck"""
self = cls.__new__(cls)
t, c, k = tck
self._eval_args = tck
#_data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = (None,None,None,None,None,k,None,len(t),t,
c,None,None,None,None)
self.ext = ext
return self
def _reset_class(self):
data = self._data
n,t,c,k,ier = data[7],data[8],data[9],data[5],data[-1]
self._eval_args = t[:n],c[:n],k
if ier == 0:
# the spline returned has a residual sum of squares fp
# such that abs(fp-s)/s <= tol with tol a relative
# tolerance set to 0.001 by the program
pass
elif ier == -1:
# the spline returned is an interpolating spline
self._set_class(InterpolatedUnivariateSpline)
elif ier == -2:
# the spline returned is the weighted least-squares
# polynomial of degree k. In this extreme case fp gives
# the upper bound fp0 for the smoothing factor s.
self._set_class(LSQUnivariateSpline)
else:
# error
if ier == 1:
self._set_class(LSQUnivariateSpline)
message = _curfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
def _set_class(self, cls):
self._spline_class = cls
if self.__class__ in (UnivariateSpline, InterpolatedUnivariateSpline,
LSQUnivariateSpline):
self.__class__ = cls
else:
# It's an unknown subclass -- don't change class. cf. #731
pass
def _reset_nest(self, data, nest=None):
n = data[10]
if nest is None:
k,m = data[5],len(data[0])
nest = m+k+1 # this is the maximum bound for nest
else:
if not n <= nest:
raise ValueError("`nest` can only be increased")
t, c, fpint, nrdata = [np.resize(data[j], nest) for j in [8,9,11,12]]
args = data[:8] + (t,c,n,fpint,nrdata,data[13])
data = dfitpack.fpcurf1(*args)
return data
def set_smoothing_factor(self, s):
""" Continue spline computation with the given smoothing
factor s and with the knots found at the last call.
This routine modifies the spline in place.
"""
data = self._data
if data[6] == -1:
warnings.warn('smoothing factor unchanged for'
'LSQ spline with fixed knots')
return
args = data[:6] + (s,) + data[7:]
data = dfitpack.fpcurf1(*args)
if data[-1] == 1:
# nest too small, setting to maximum bound
data = self._reset_nest(data)
self._data = data
self._reset_class()
def __call__(self, x, nu=0, ext=None):
"""
Evaluate spline (or its nu-th derivative) at positions x.
Parameters
----------
x : array_like
A 1-D array of points at which to return the value of the smoothed
spline or its derivatives. Note: x can be unordered but the
evaluation is more efficient if x is (partially) ordered.
nu : int
The order of derivative of the spline to compute.
ext : int
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 or 'const', return the boundary value.
The default value is 0, passed from the initialization of
UnivariateSpline.
"""
x = np.asarray(x)
# empty input yields empty output
if x.size == 0:
return array([])
# if nu is None:
# return dfitpack.splev(*(self._eval_args+(x,)))
# return dfitpack.splder(nu=nu,*(self._eval_args+(x,)))
if ext is None:
ext = self.ext
else:
try:
ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
return fitpack.splev(x, self._eval_args, der=nu, ext=ext)
def get_knots(self):
""" Return positions of interior knots of the spline.
Internally, the knot vector contains ``2*k`` additional boundary knots.
"""
data = self._data
k,n = data[5],data[7]
return data[8][k:n-k]
def get_coeffs(self):
"""Return spline coefficients."""
data = self._data
k,n = data[5],data[7]
return data[9][:n-k-1]
def get_residual(self):
"""Return weighted sum of squared residuals of the spline approximation.
This is equivalent to::
sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)
"""
return self._data[10]
def integral(self, a, b):
""" Return definite integral of the spline between two given points.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
integral : float
The value of the definite integral of the spline between limits.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.integral(0, 3)
9.0
which agrees with :math:`\\int x^2 dx = x^3 / 3` between the limits
of 0 and 3.
A caveat is that this routine assumes the spline to be zero outside of
the data limits:
>>> spl.integral(-1, 4)
9.0
>>> spl.integral(-1, 0)
0.0
"""
return dfitpack.splint(*(self._eval_args+(a,b)))
def derivatives(self, x):
""" Return all derivatives of the spline at the point x.
Parameters
----------
x : float
The point to evaluate the derivatives at.
Returns
-------
der : ndarray, shape(k+1,)
Derivatives of the orders 0 to k.
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 3, 11)
>>> y = x**2
>>> spl = UnivariateSpline(x, y)
>>> spl.derivatives(1.5)
array([2.25, 3.0, 2.0, 0])
"""
d,ier = dfitpack.spalde(*(self._eval_args+(x,)))
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return d
def roots(self):
""" Return the zeros of the spline.
Restriction: only cubic splines are supported by fitpack.
"""
k = self._data[5]
if k == 3:
z,m,ier = dfitpack.sproot(*self._eval_args[:2])
if not ier == 0:
raise ValueError("Error code returned by spalde: %s" % ier)
return z[:m]
raise NotImplementedError('finding roots unsupported for '
'non-cubic splines')
def derivative(self, n=1):
"""
Construct a new spline representing the derivative of this spline.
Parameters
----------
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k-n representing the derivative of this
spline.
See Also
--------
splder, antiderivative
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = UnivariateSpline(x, y, k=4, s=0)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> spl.derivative().roots() / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
tck = fitpack.splder(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
def antiderivative(self, n=1):
"""
Construct a new spline representing the antiderivative of this spline.
Parameters
----------
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
spline : UnivariateSpline
Spline of order k2=k+n representing the antiderivative of this
spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, derivative
Examples
--------
>>> from scipy.interpolate import UnivariateSpline
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = UnivariateSpline(x, y, s=0)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> spl(1.7), spl.antiderivative().derivative()(1.7)
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = spl.antiderivative()
>>> ispl(np.pi/2) - ispl(0)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
tck = fitpack.splantider(self._eval_args, n)
return UnivariateSpline._from_tck(tck, self.ext)
class InterpolatedUnivariateSpline(UnivariateSpline):
"""
One-dimensional interpolating spline for a given set of data points.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. Spline
function passes through all provided points. Equivalent to
`UnivariateSpline` with s=0.
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
input dimension of data points
w : (N,) array_like, optional
Weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox=[x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
See Also
--------
UnivariateSpline : Superclass -- allows knots to be selected by a
smoothing condition
LSQUnivariateSpline : spline for which knots are user-selected
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import InterpolatedUnivariateSpline
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
>>> spl = InterpolatedUnivariateSpline(x, y)
>>> plt.plot(x, y, 'ro', ms=5)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(xs, spl(xs), 'g', lw=3, alpha=0.7)
>>> plt.show()
Notice that the ``spl(x)`` interpolates `y`:
>>> spl.get_residual()
0.0
"""
def __init__(self, x, y, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite):
raise ValueError("Input must not contain NaNs or infs.")
if not all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
self._data = dfitpack.fpcurf0(x,y,k,w=w,
xb=bbox[0],xe=bbox[1],s=0)
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
_fpchec_error_string = """The input parameters have been rejected by fpchec. \
This means that at least one of the following conditions is violated:
1) k+1 <= n-k-1 <= m
2) t(1) <= t(2) <= ... <= t(k+1)
t(n-k) <= t(n-k+1) <= ... <= t(n)
3) t(k+1) < t(k+2) < ... < t(n-k)
4) t(k+1) <= x(i) <= t(n-k)
5) The conditions specified by Schoenberg and Whitney must hold
for at least one subset of data points, i.e., there must be a
subset of data points y(j) such that
t(j) < y(j) < t(j+k+1), j=1,2,...,n-k-1
"""
class LSQUnivariateSpline(UnivariateSpline):
"""
One-dimensional spline with explicit internal knots.
Fits a spline y = spl(x) of degree `k` to the provided `x`, `y` data. `t`
specifies the internal knots of the spline
Parameters
----------
x : (N,) array_like
Input dimension of data points -- must be increasing
y : (N,) array_like
Input dimension of data points
t : (M,) array_like
interior knots of the spline. Must be in ascending order and::
bbox[0] < t[0] < ... < t[-1] < bbox[-1]
w : (N,) array_like, optional
weights for spline fitting. Must be positive. If None (default),
weights are all equal.
bbox : (2,) array_like, optional
2-sequence specifying the boundary of the approximation interval. If
None (default), ``bbox = [x[0], x[-1]]``.
k : int, optional
Degree of the smoothing spline. Must be 1 <= `k` <= 5.
Default is k=3, a cubic spline.
ext : int or str, optional
Controls the extrapolation mode for elements
not in the interval defined by the knot sequence.
* if ext=0 or 'extrapolate', return the extrapolated value.
* if ext=1 or 'zeros', return 0
* if ext=2 or 'raise', raise a ValueError
* if ext=3 of 'const', return the boundary value.
The default value is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination or non-sensical results) if the inputs
do contain infinities or NaNs.
Default is False.
Raises
------
ValueError
If the interior knots do not satisfy the Schoenberg-Whitney conditions
See Also
--------
UnivariateSpline : Superclass -- knots are specified by setting a
smoothing condition
InterpolatedUnivariateSpline : spline passing through all points
splrep : An older, non object-oriented wrapping of FITPACK
splev, sproot, splint, spalde
BivariateSpline : A similar class for two-dimensional spline interpolation
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
>>> from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Fit a smoothing spline with a pre-defined internal knots:
>>> t = [-1, 0, 1]
>>> spl = LSQUnivariateSpline(x, y, t)
>>> xs = np.linspace(-3, 3, 1000)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3)
>>> plt.show()
Check the knot vector:
>>> spl.get_knots()
array([-3., -1., 0., 1., 3.])
Constructing lsq spline using the knots from another spline:
>>> x = np.arange(10)
>>> s = UnivariateSpline(x, x, s=0)
>>> s.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
>>> knt = s.get_knots()
>>> s1 = LSQUnivariateSpline(x, x, knt[1:-1]) # Chop 1st and last knot
>>> s1.get_knots()
array([ 0., 2., 3., 4., 5., 6., 7., 9.])
"""
def __init__(self, x, y, t, w=None, bbox=[None]*2, k=3,
ext=0, check_finite=False):
if check_finite:
w_finite = np.isfinite(w).all() if w is not None else True
if (not np.isfinite(x).all() or not np.isfinite(y).all() or
not w_finite or not np.isfinite(t).all()):
raise ValueError("Input(s) must not contain NaNs or infs.")
if not all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
# _data == x,y,w,xb,xe,k,s,n,t,c,fp,fpint,nrdata,ier
xb = bbox[0]
xe = bbox[1]
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
t = concatenate(([xb]*(k+1), t, [xe]*(k+1)))
n = len(t)
if not alltrue(t[k+1:n-k]-t[k:n-k-1] > 0, axis=0):
raise ValueError('Interior knots t must satisfy '
'Schoenberg-Whitney conditions')
if not dfitpack.fpchec(x, t, k) == 0:
raise ValueError(_fpchec_error_string)
data = dfitpack.fpcurfm1(x, y, k, t, w=w, xb=xb, xe=xe)
self._data = data[:-3] + (None, None, data[-1])
self._reset_class()
try:
self.ext = _extrap_modes[ext]
except KeyError:
raise ValueError("Unknown extrapolation mode %s." % ext)
################ Bivariate spline ####################
class _BivariateSplineBase(object):
""" Base class for Bivariate spline s(x,y) interpolation on the rectangle
[xb,xe] x [yb, ye] calculated from a given set of data points
(x,y,z).
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
BivariateSpline :
implementation of bivariate spline interpolation on a plane grid
SphereBivariateSpline :
implementation of bivariate spline interpolation on a spherical grid
"""
def get_residual(self):
""" Return weighted sum of squared residuals of the spline
approximation: sum ((w[i]*(z[i]-s(x[i],y[i])))**2,axis=0)
"""
return self.fp
def get_knots(self):
""" Return a tuple (tx,ty) where tx,ty contain knots positions
of the spline with respect to x-, y-variable, respectively.
The position of interior and additional knots are given as
t[k+1:-k-1] and t[:k+1]=b, t[-k-1:]=e, respectively.
"""
return self.tck[:2]
def get_coeffs(self):
""" Return spline coefficients."""
return self.tck[2]
def __call__(self, x, y, dx=0, dy=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
x, y : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points ``(x[i],
y[i]), i=0, ..., len(x)-1``. Standard Numpy broadcasting
is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays x, y. The arrays must be
sorted to increasing order.
Note that the axis ordering is inverted relative to
the output of meshgrid.
dx : int
Order of x-derivative
.. versionadded:: 0.14.0
dy : int
Order of y-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
x = np.asarray(x)
y = np.asarray(y)
tx, ty, c = self.tck[:3]
kx, ky = self.degrees
if grid:
if x.size == 0 or y.size == 0:
return np.zeros((x.size, y.size), dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.parder(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by parder: %s" % ier)
else:
z,ier = dfitpack.bispev(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispev: %s" % ier)
else:
# standard Numpy broadcasting
if x.shape != y.shape:
x, y = np.broadcast_arrays(x, y)
shape = x.shape
x = x.ravel()
y = y.ravel()
if x.size == 0 or y.size == 0:
return np.zeros(shape, dtype=self.tck[2].dtype)
if dx or dy:
z,ier = dfitpack.pardeu(tx,ty,c,kx,ky,dx,dy,x,y)
if not ier == 0:
raise ValueError("Error code returned by pardeu: %s" % ier)
else:
z,ier = dfitpack.bispeu(tx,ty,c,kx,ky,x,y)
if not ier == 0:
raise ValueError("Error code returned by bispeu: %s" % ier)
z = z.reshape(shape)
return z
_surfit_messages = {1:"""
The required storage space exceeds the available storage space: nxest
or nyest too small, or s too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
2:"""
A theoretically impossible result was found during the iteration
process for finding a smoothing spline with fp = s: s too small or
badly chosen eps.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
3:"""
the maximal number of iterations maxit (set to 20 by the program)
allowed for finding a smoothing spline with fp=s has been reached:
s too small.
Weighted sum of squared residuals does not satisfy abs(fp-s)/s < tol.""",
4:"""
No more knots can be added because the number of b-spline coefficients
(nx-kx-1)*(ny-ky-1) already exceeds the number of data points m:
either s or m too small.
The weighted least-squares spline corresponds to the current set of
knots.""",
5:"""
No more knots can be added because the additional knot would (quasi)
coincide with an old one: s too small or too large a weight to an
inaccurate data point.
The weighted least-squares spline corresponds to the current set of
knots.""",
10:"""
Error on entry, no approximation returned. The following conditions
must hold:
xb<=x[i]<=xe, yb<=y[i]<=ye, w[i]>0, i=0..m-1
If iopt==-1, then
xb<tx[kx+1]<tx[kx+2]<...<tx[nx-kx-2]<xe
yb<ty[ky+1]<ty[ky+2]<...<ty[ny-ky-2]<ye""",
-3:"""
The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank deficient
system (deficiency=%i). If deficiency is large, the results may be
inaccurate. Deficiency may strongly depend on the value of eps."""
}
class BivariateSpline(_BivariateSplineBase):
"""
Base class for bivariate splines.
This describes a spline ``s(x, y)`` of degrees ``kx`` and ``ky`` on
the rectangle ``[xb, xe] * [yb, ye]`` calculated from a given set
of data points ``(x, y, z)``.
This class is meant to be subclassed, not instantiated directly.
To construct these splines, call either `SmoothBivariateSpline` or
`LSQBivariateSpline`.
See Also
--------
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline :
to create a BivariateSpline through the given points
LSQBivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
SphereBivariateSpline :
bivariate spline interpolation in spherical cooridinates
bisplrep : older wrapping of FITPACK
bisplev : older wrapping of FITPACK
"""
@classmethod
def _from_tck(cls, tck):
"""Construct a spline object from given tck and degree"""
self = cls.__new__(cls)
if len(tck) != 5:
raise ValueError("tck should be a 5 element tuple of tx, ty, c, kx, ky")
self.tck = tck[:3]
self.degrees = tck[3:]
return self
def ev(self, xi, yi, dx=0, dy=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(xi[i], yi[i]),
i=0,...,len(xi)-1``.
Parameters
----------
xi, yi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dx : int, optional
Order of x-derivative
.. versionadded:: 0.14.0
dy : int, optional
Order of y-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(xi, yi, dx=dx, dy=dy, grid=False)
def integral(self, xa, xb, ya, yb):
"""
Evaluate the integral of the spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx,ty,c = self.tck[:3]
kx,ky = self.degrees
return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb)
class SmoothBivariateSpline(BivariateSpline):
"""
Smooth bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
w : array_like, optional
Positive 1-D sequence of weights, of same length as `x`, `y` and `z`.
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if ``1/w[i]`` is an
estimate of the standard deviation of ``z[i]``.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
LSQUnivariateSpline : to create a BivariateSpline using weighted
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, w=None, bbox=[None] * 4, kx=3, ky=3, s=None,
eps=None):
xb,xe,yb,ye = bbox
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=1)
if ier > 10: # lwrk2 was to small, re-run
nx,tx,ny,ty,c,fp,wrk1,ier = dfitpack.surfit_smth(x,y,z,w,
xb,xe,yb,ye,
kx,ky,s=s,
eps=eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
message = _surfit_messages.get(ier,'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx[:nx],ty[:ny],c[:(nx-kx-1)*(ny-ky-1)]
self.degrees = kx,ky
class LSQBivariateSpline(BivariateSpline):
"""
Weighted least-squares bivariate spline approximation.
Parameters
----------
x, y, z : array_like
1-D sequences of data points (order is not important).
tx, ty : array_like
Strictly ordered 1-D sequences of knots coordinates.
w : array_like, optional
Positive 1-D array of weights, of the same length as `x`, `y` and `z`.
bbox : (4,) array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
See Also
--------
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothBivariateSpline : create a smoothing BivariateSpline
Notes
-----
The length of `x`, `y` and `z` should be at least ``(kx+1) * (ky+1)``.
"""
def __init__(self, x, y, z, tx, ty, w=None, bbox=[None]*4, kx=3, ky=3,
eps=None):
nx = 2*kx+2+len(tx)
ny = 2*ky+2+len(ty)
tx1 = zeros((nx,),float)
ty1 = zeros((ny,),float)
tx1[kx+1:nx-kx-1] = tx
ty1[ky+1:ny-ky-1] = ty
xb,xe,yb,ye = bbox
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=1)
if ier > 10:
tx1,ty1,c,fp,ier = dfitpack.surfit_lsq(x,y,z,tx1,ty1,w,
xb,xe,yb,ye,
kx,ky,eps,lwrk2=ier)
if ier in [0,-1,-2]: # normal return
pass
else:
if ier < -2:
deficiency = (nx-kx-1)*(ny-ky-1)+ier
message = _surfit_messages.get(-3) % (deficiency)
else:
message = _surfit_messages.get(ier, 'ier=%s' % (ier))
warnings.warn(message)
self.fp = fp
self.tck = tx1, ty1, c
self.degrees = kx, ky
class RectBivariateSpline(BivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh.
Can be used for both smoothing and interpolating data.
Parameters
----------
x,y : array_like
1-D arrays of coordinates in strictly ascending order.
z : array_like
2-D array of data with shape (x.size,y.size).
bbox : array_like, optional
Sequence of length 4 specifying the boundary of the rectangular
approximation domain. By default,
``bbox=[min(x,tx),max(x,tx), min(y,ty),max(y,ty)]``.
kx, ky : ints, optional
Degrees of the bivariate spline. Default is 3.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w[i]*(z[i]-s(x[i], y[i])))**2, axis=0) <= s``
Default is ``s=0``, which is for interpolation.
See Also
--------
SmoothBivariateSpline : a smoothing bivariate spline for scattered data
bisplrep : an older wrapping of FITPACK
bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
"""
def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0):
x, y = ravel(x), ravel(y)
if not all(diff(x) > 0.0):
raise ValueError('x must be strictly increasing')
if not all(diff(y) > 0.0):
raise ValueError('y must be strictly increasing')
if not ((x.min() == x[0]) and (x.max() == x[-1])):
raise ValueError('x must be strictly ascending')
if not ((y.min() == y[0]) and (y.max() == y[-1])):
raise ValueError('y must be strictly ascending')
if not x.size == z.shape[0]:
raise ValueError('x dimension of z must have same number of '
'elements as x')
if not y.size == z.shape[1]:
raise ValueError('y dimension of z must have same number of '
'elements as y')
z = ravel(z)
xb, xe, yb, ye = bbox
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(x, y, z, xb, xe, yb,
ye, kx, ky, s)
if ier not in [0, -1, -2]:
msg = _surfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)]
self.degrees = kx, ky
_spherefit_messages = _surfit_messages.copy()
_spherefit_messages[10] = """
ERROR. On entry, the input data are controlled on validity. The following
restrictions must be satisfied:
-1<=iopt<=1, m>=2, ntest>=8 ,npest >=8, 0<eps<1,
0<=teta(i)<=pi, 0<=phi(i)<=2*pi, w(i)>0, i=1,...,m
lwrk1 >= 185+52*v+10*u+14*u*v+8*(u-1)*v**2+8*m
kwrk >= m+(ntest-7)*(npest-7)
if iopt=-1: 8<=nt<=ntest , 9<=np<=npest
0<tt(5)<tt(6)<...<tt(nt-4)<pi
0<tp(5)<tp(6)<...<tp(np-4)<2*pi
if iopt>=0: s>=0
if one of these conditions is found to be violated,control
is immediately repassed to the calling program. in that
case there is no approximation returned."""
_spherefit_messages[-3] = """
WARNING. The coefficients of the spline returned have been computed as the
minimal norm least-squares solution of a (numerically) rank
deficient system (deficiency=%i, rank=%i). Especially if the rank
deficiency, which is computed by 6+(nt-8)*(np-7)+ier, is large,
the results may be inaccurate. They could also seriously depend on
the value of eps."""
class SphereBivariateSpline(_BivariateSplineBase):
"""
Bivariate spline s(x,y) of degrees 3 on a sphere, calculated from a
given set of data points (theta,phi,r).
.. versionadded:: 0.11.0
See Also
--------
bisplrep, bisplev : an older wrapping of FITPACK
UnivariateSpline : a similar class for univariate spline interpolation
SmoothUnivariateSpline :
to create a BivariateSpline through the given points
LSQUnivariateSpline :
to create a BivariateSpline using weighted least-squares fitting
"""
def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True):
"""
Evaluate the spline or its derivatives at given positions.
Parameters
----------
theta, phi : array_like
Input coordinates.
If `grid` is False, evaluate the spline at points
``(theta[i], phi[i]), i=0, ..., len(x)-1``. Standard
Numpy broadcasting is obeyed.
If `grid` is True: evaluate spline at the grid points
defined by the coordinate arrays theta, phi. The arrays
must be sorted to increasing order.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int
Order of phi-derivative
.. versionadded:: 0.14.0
grid : bool
Whether to evaluate the results on a grid spanned by the
input arrays, or at points specified by the input arrays.
.. versionadded:: 0.14.0
"""
theta = np.asarray(theta)
phi = np.asarray(phi)
if theta.size > 0 and (theta.min() < 0. or theta.max() > np.pi):
raise ValueError("requested theta out of bounds.")
if phi.size > 0 and (phi.min() < 0. or phi.max() > 2. * np.pi):
raise ValueError("requested phi out of bounds.")
return _BivariateSplineBase.__call__(self, theta, phi,
dx=dtheta, dy=dphi, grid=grid)
def ev(self, theta, phi, dtheta=0, dphi=0):
"""
Evaluate the spline at points
Returns the interpolated value at ``(theta[i], phi[i]),
i=0,...,len(theta)-1``.
Parameters
----------
theta, phi : array_like
Input coordinates. Standard Numpy broadcasting is obeyed.
dtheta : int, optional
Order of theta-derivative
.. versionadded:: 0.14.0
dphi : int, optional
Order of phi-derivative
.. versionadded:: 0.14.0
"""
return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
class SmoothSphereBivariateSpline(SphereBivariateSpline):
"""
Smooth bivariate spline approximation in spherical coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
w : array_like, optional
Positive 1-D sequence of weights.
s : float, optional
Positive smoothing factor defined for estimation condition:
``sum((w(i)*(r(i) - s(theta(i), phi(i))))**2, axis=0) <= s``
Default ``s=len(w)`` which should be a good value if 1/w[i] is an
estimate of the standard deviation of r[i].
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object
>>> lats, lons = np.meshgrid(theta, phi)
>>> from scipy.interpolate import SmoothSphereBivariateSpline
>>> lut = SmoothSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), s=3.5)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2 * np.pi, 90)
>>> data_smth = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_smth, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, w=None, s=0., eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, tt_, np_, tp_, c, fp, ier = dfitpack.spherfit_smth(theta, phi,
r, w=w, s=s,
eps=eps)
if ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_[:nt_], tp_[:np_], c[:(nt_ - 4) * (np_ - 4)]
self.degrees = (3, 3)
class LSQSphereBivariateSpline(SphereBivariateSpline):
"""
Weighted least-squares bivariate spline approximation in spherical
coordinates.
.. versionadded:: 0.11.0
Parameters
----------
theta, phi, r : array_like
1-D sequences of data points (order is not important). Coordinates
must be given in radians. Theta must lie within the interval (0, pi),
and phi must lie within the interval (0, 2pi).
tt, tp : array_like
Strictly ordered 1-D sequences of knots coordinates.
Coordinates must satisfy ``0 < tt[i] < pi``, ``0 < tp[i] < 2*pi``.
w : array_like, optional
Positive 1-D sequence of weights, of the same length as `theta`, `phi`
and `r`.
eps : float, optional
A threshold for determining the effective rank of an over-determined
linear system of equations. `eps` should have a value between 0 and 1,
the default is 1e-16.
Notes
-----
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/sphere.f
Examples
--------
Suppose we have global data on a coarse grid (the input data does not
have to be on a grid):
>>> theta = np.linspace(0., np.pi, 7)
>>> phi = np.linspace(0., 2*np.pi, 9)
>>> data = np.empty((theta.shape[0], phi.shape[0]))
>>> data[:,0], data[0,:], data[-1,:] = 0., 0., 0.
>>> data[1:-1,1], data[1:-1,-1] = 1., 1.
>>> data[1,1:-1], data[-2,1:-1] = 1., 1.
>>> data[2:-2,2], data[2:-2,-2] = 2., 2.
>>> data[2,2:-2], data[-3,2:-2] = 2., 2.
>>> data[3,3:-2] = 3.
>>> data = np.roll(data, 4, 1)
We need to set up the interpolator object. Here, we must also specify the
coordinates of the knots to use.
>>> lats, lons = np.meshgrid(theta, phi)
>>> knotst, knotsp = theta.copy(), phi.copy()
>>> knotst[0] += .0001
>>> knotst[-1] -= .0001
>>> knotsp[0] += .0001
>>> knotsp[-1] -= .0001
>>> from scipy.interpolate import LSQSphereBivariateSpline
>>> lut = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
... data.T.ravel(), knotst, knotsp)
As a first test, we'll see what the algorithm returns when run on the
input coordinates
>>> data_orig = lut(theta, phi)
Finally we interpolate the data to a finer grid
>>> fine_lats = np.linspace(0., np.pi, 70)
>>> fine_lons = np.linspace(0., 2*np.pi, 90)
>>> data_lsq = lut(fine_lats, fine_lons)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(131)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(132)
>>> ax2.imshow(data_orig, interpolation='nearest')
>>> ax3 = fig.add_subplot(133)
>>> ax3.imshow(data_lsq, interpolation='nearest')
>>> plt.show()
"""
def __init__(self, theta, phi, r, tt, tp, w=None, eps=1E-16):
if np.issubclass_(w, float):
w = ones(len(theta)) * w
nt_, np_ = 8 + len(tt), 8 + len(tp)
tt_, tp_ = zeros((nt_,), float), zeros((np_,), float)
tt_[4:-4], tp_[4:-4] = tt, tp
tt_[-4:], tp_[-4:] = np.pi, 2. * np.pi
tt_, tp_, c, fp, ier = dfitpack.spherfit_lsq(theta, phi, r, tt_, tp_,
w=w, eps=eps)
if ier < -2:
deficiency = 6 + (nt_ - 8) * (np_ - 7) + ier
message = _spherefit_messages.get(-3) % (deficiency, -ier)
warnings.warn(message)
elif ier not in [0, -1, -2]:
message = _spherefit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(message)
self.fp = fp
self.tck = tt_, tp_, c
self.degrees = (3, 3)
_spfit_messages = _surfit_messages.copy()
_spfit_messages[10] = """
ERROR: on entry, the input data are controlled on validity
the following restrictions must be satisfied.
-1<=iopt(1)<=1, 0<=iopt(2)<=1, 0<=iopt(3)<=1,
-1<=ider(1)<=1, 0<=ider(2)<=1, ider(2)=0 if iopt(2)=0.
-1<=ider(3)<=1, 0<=ider(4)<=1, ider(4)=0 if iopt(3)=0.
mu >= mumin (see above), mv >= 4, nuest >=8, nvest >= 8,
kwrk>=5+mu+mv+nuest+nvest,
lwrk >= 12+nuest*(mv+nvest+3)+nvest*24+4*mu+8*mv+max(nuest,mv+nvest)
0< u(i-1)<u(i)< pi,i=2,..,mu,
-pi<=v(1)< pi, v(1)<v(i-1)<v(i)<v(1)+2*pi, i=3,...,mv
if iopt(1)=-1: 8<=nu<=min(nuest,mu+6+iopt(2)+iopt(3))
0<tu(5)<tu(6)<...<tu(nu-4)< pi
8<=nv<=min(nvest,mv+7)
v(1)<tv(5)<tv(6)<...<tv(nv-4)<v(1)+2*pi
the schoenberg-whitney conditions, i.e. there must be
subset of grid co-ordinates uu(p) and vv(q) such that
tu(p) < uu(p) < tu(p+4) ,p=1,...,nu-4
(iopt(2)=1 and iopt(3)=1 also count for a uu-value
tv(q) < vv(q) < tv(q+4) ,q=1,...,nv-4
(vv(q) is either a value v(j) or v(j)+2*pi)
if iopt(1)>=0: s>=0
if s=0: nuest>=mu+6+iopt(2)+iopt(3), nvest>=mv+7
if one of these conditions is found to be violated,control is
immediately repassed to the calling program. in that case there is no
approximation returned."""
class RectSphereBivariateSpline(SphereBivariateSpline):
"""
Bivariate spline approximation over a rectangular mesh on a sphere.
Can be used for smoothing data.
.. versionadded:: 0.11.0
Parameters
----------
u : array_like
1-D array of latitude coordinates in strictly ascending order.
Coordinates must be given in radians and lie within the interval
(0, pi).
v : array_like
1-D array of longitude coordinates in strictly ascending order.
Coordinates must be given in radians. First element (v[0]) must lie
within the interval [-pi, pi). Last element (v[-1]) must satisfy
v[-1] <= v[0] + 2*pi.
r : array_like
2-D array of data with shape ``(u.size, v.size)``.
s : float, optional
Positive smoothing factor defined for estimation condition
(``s=0`` is for interpolation).
pole_continuity : bool or (bool, bool), optional
Order of continuity at the poles ``u=0`` (``pole_continuity[0]``) and
``u=pi`` (``pole_continuity[1]``). The order of continuity at the pole
will be 1 or 0 when this is True or False, respectively.
Defaults to False.
pole_values : float or (float, float), optional
Data values at the poles ``u=0`` and ``u=pi``. Either the whole
parameter or each individual element can be None. Defaults to None.
pole_exact : bool or (bool, bool), optional
Data value exactness at the poles ``u=0`` and ``u=pi``. If True, the
value is considered to be the right function value, and it will be
fitted exactly. If False, the value will be considered to be a data
value just like the other data values. Defaults to False.
pole_flat : bool or (bool, bool), optional
For the poles at ``u=0`` and ``u=pi``, specify whether or not the
approximation has vanishing derivatives. Defaults to False.
See Also
--------
RectBivariateSpline : bivariate spline approximation over a rectangular
mesh
Notes
-----
Currently, only the smoothing spline approximation (``iopt[0] = 0`` and
``iopt[0] = 1`` in the FITPACK routine) is supported. The exact
least-squares spline approximation is not implemented yet.
When actually performing the interpolation, the requested `v` values must
lie within the same length 2pi interval that the original `v` values were
chosen from.
For more information, see the FITPACK_ site about this function.
.. _FITPACK: http://www.netlib.org/dierckx/spgrid.f
Examples
--------
Suppose we have global data on a coarse grid
>>> lats = np.linspace(10, 170, 9) * np.pi / 180.
>>> lons = np.linspace(0, 350, 18) * np.pi / 180.
>>> data = np.dot(np.atleast_2d(90. - np.linspace(-80., 80., 18)).T,
... np.atleast_2d(180. - np.abs(np.linspace(0., 350., 9)))).T
We want to interpolate it to a global one-degree grid
>>> new_lats = np.linspace(1, 180, 180) * np.pi / 180
>>> new_lons = np.linspace(1, 360, 360) * np.pi / 180
>>> new_lats, new_lons = np.meshgrid(new_lats, new_lons)
We need to set up the interpolator object
>>> from scipy.interpolate import RectSphereBivariateSpline
>>> lut = RectSphereBivariateSpline(lats, lons, data)
Finally we interpolate the data. The `RectSphereBivariateSpline` object
only takes 1-D arrays as input, therefore we need to do some reshaping.
>>> data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
Looking at the original and the interpolated data, one can see that the
interpolant reproduces the original data very well:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> ax1.imshow(data, interpolation='nearest')
>>> ax2 = fig.add_subplot(212)
>>> ax2.imshow(data_interp, interpolation='nearest')
>>> plt.show()
Choosing the optimal value of ``s`` can be a delicate task. Recommended
values for ``s`` depend on the accuracy of the data values. If the user
has an idea of the statistical errors on the data, she can also find a
proper estimate for ``s``. By assuming that, if she specifies the
right ``s``, the interpolator will use a spline ``f(u,v)`` which exactly
reproduces the function underlying the data, she can evaluate
``sum((r(i,j)-s(u(i),v(j)))**2)`` to find a good estimate for this ``s``.
For example, if she knows that the statistical errors on her
``r(i,j)``-values are not greater than 0.1, she may expect that a good
``s`` should have a value not larger than ``u.size * v.size * (0.1)**2``.
If nothing is known about the statistical error in ``r(i,j)``, ``s`` must
be determined by trial and error. The best is then to start with a very
large value of ``s`` (to determine the least-squares polynomial and the
corresponding upper bound ``fp0`` for ``s``) and then to progressively
decrease the value of ``s`` (say by a factor 10 in the beginning, i.e.
``s = fp0 / 10, fp0 / 100, ...`` and more carefully as the approximation
shows more detail) to obtain closer fits.
The interpolation results for different values of ``s`` give some insight
into this process:
>>> fig2 = plt.figure()
>>> s = [3e9, 2e9, 1e9, 1e8]
>>> for ii in range(len(s)):
... lut = RectSphereBivariateSpline(lats, lons, data, s=s[ii])
... data_interp = lut.ev(new_lats.ravel(),
... new_lons.ravel()).reshape((360, 180)).T
... ax = fig2.add_subplot(2, 2, ii+1)
... ax.imshow(data_interp, interpolation='nearest')
... ax.set_title("s = %g" % s[ii])
>>> plt.show()
"""
def __init__(self, u, v, r, s=0., pole_continuity=False, pole_values=None,
pole_exact=False, pole_flat=False):
iopt = np.array([0, 0, 0], dtype=int)
ider = np.array([-1, 0, -1, 0], dtype=int)
if pole_values is None:
pole_values = (None, None)
elif isinstance(pole_values, (float, np.float32, np.float64)):
pole_values = (pole_values, pole_values)
if isinstance(pole_continuity, bool):
pole_continuity = (pole_continuity, pole_continuity)
if isinstance(pole_exact, bool):
pole_exact = (pole_exact, pole_exact)
if isinstance(pole_flat, bool):
pole_flat = (pole_flat, pole_flat)
r0, r1 = pole_values
iopt[1:] = pole_continuity
if r0 is None:
ider[0] = -1
else:
ider[0] = pole_exact[0]
if r1 is None:
ider[2] = -1
else:
ider[2] = pole_exact[1]
ider[1], ider[3] = pole_flat
u, v = np.ravel(u), np.ravel(v)
if not np.all(np.diff(u) > 0.0):
raise ValueError('u must be strictly increasing')
if not np.all(np.diff(v) > 0.0):
raise ValueError('v must be strictly increasing')
if not u.size == r.shape[0]:
raise ValueError('u dimension of r must have same number of '
'elements as u')
if not v.size == r.shape[1]:
raise ValueError('v dimension of r must have same number of '
'elements as v')
if pole_continuity[1] is False and pole_flat[1] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
if pole_continuity[0] is False and pole_flat[0] is True:
raise ValueError('if pole_continuity is False, so must be '
'pole_flat')
r = np.ravel(r)
nu, tu, nv, tv, c, fp, ier = dfitpack.regrid_smth_spher(iopt, ider,
u.copy(), v.copy(), r.copy(), r0, r1, s)
if ier not in [0, -1, -2]:
msg = _spfit_messages.get(ier, 'ier=%s' % (ier))
raise ValueError(msg)
self.fp = fp
self.tck = tu[:nu], tv[:nv], c[:(nu - 4) * (nv-4)]
self.degrees = (3, 3)
| 62,067 | 35.275862 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/setup.py
|
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
lapack_opt = get_info('lapack_opt', notfound_action=2)
config = Configuration('interpolate', parent_package, top_path)
fitpack_src = [join('fitpack', '*.f')]
config.add_library('fitpack', sources=fitpack_src)
config.add_extension('interpnd',
sources=['interpnd.c'])
config.add_extension('_ppoly',
sources=['_ppoly.c'],
**lapack_opt)
config.add_extension('_bspl',
sources=['_bspl.c'],
libraries=['fitpack'],
depends=['src/__fitpack.h'] + fitpack_src)
config.add_extension('_fitpack',
sources=['src/_fitpackmodule.c'],
libraries=['fitpack'],
depends=(['src/__fitpack.h','src/multipack.h']
+ fitpack_src)
)
config.add_extension('dfitpack',
sources=['src/fitpack.pyf'],
libraries=['fitpack'],
depends=fitpack_src,
)
config.add_extension('_interpolate',
sources=['src/_interpolate.cpp'],
include_dirs=['src'],
depends=['src/interpolate.h'])
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,772 | 31.236364 | 71 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/rbf.py
|
"""rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <jtravs@gmail.com>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <hetland@tamu.edu>
Copyright (c) 2007, John Travers <jtravs@gmail.com>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy import linalg
from scipy._lib.six import callable, get_method_function, get_function_code
from scipy.special import xlogy
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(default is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g, the default::
def euclidean_norm(x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
which is called with ``x1 = x1[ndims, newaxis, :]`` and
``x2 = x2[ndims, : ,newaxis]`` such that the result is a matrix of the
distances from each point in ``x1`` to each point in ``x2``.
Examples
--------
>>> from scipy.interpolate import Rbf
>>> x, y, z, d = np.random.rand(4, 50)
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> xi = yi = zi = np.linspace(0, 1, 20)
>>> di = rbfi(xi, yi, zi) # interpolated values
>>> di.shape
(20,)
"""
def _euclidean_norm(self, x1, x2):
return np.sqrt(((x1 - x2)**2).sum(axis=0))
def _h_multiquadric(self, r):
return np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/np.sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return np.exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
return xlogy(r**2, r)
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = get_method_function(self.function)
elif hasattr(self.function, "__call__"):
val = get_method_function(self.function.__call__)
else:
raise ValueError("Cannot determine number of arguments to function")
argcount = get_function_code(val).co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = self.function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of the same shape")
return a0
def __init__(self, *args, **kwargs):
self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = np.asarray(args[-1]).flatten()
if not all([x.size == self.di.size for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', self._euclidean_norm)
self.epsilon = kwargs.pop('epsilon', None)
if self.epsilon is None:
# default epsilon is the "the average distance between nodes" based
# on a bounding hypercube
dim = self.xi.shape[0]
ximax = np.amax(self.xi, axis=1)
ximin = np.amin(self.xi, axis=1)
edges = ximax-ximin
edges = edges[np.nonzero(edges)]
self.epsilon = np.power(np.prod(edges)/self.N, 1.0/edges.size)
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self
# for use by any user-callable function or
# to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.nodes = linalg.solve(self.A, self.di)
@property
def A(self):
# this only exists for backwards compatibility: self.A was available
# and, at least technically, public.
r = self._call_norm(self.xi, self.xi)
return self._init_function(r) - np.eye(self.N)*self.smooth
def _call_norm(self, x1, x2):
if len(x1.shape) == 1:
x1 = x1[np.newaxis, :]
if len(x2.shape) == 1:
x2 = x2[np.newaxis, :]
x1 = x1[..., :, np.newaxis]
x2 = x2[..., np.newaxis, :]
return self.norm(x1, x2)
def __call__(self, *args):
args = [np.asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
shp = args[0].shape
xa = np.asarray([a.flatten() for a in args], dtype=np.float_)
r = self._call_norm(xa, self.xi)
return np.dot(self._function(r), self.nodes).reshape(shp)
| 9,712 | 38.48374 | 91 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/_pade.py
|
from __future__ import division, print_function, absolute_import
from numpy import zeros, asarray, eye, poly1d, hstack, r_
from scipy import linalg
__all__ = ["pade"]
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The Pade approximation of the polynomial defined by `an` is
``p(x)/q(x)``.
Examples
--------
>>> from scipy.interpolate import pade
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the Pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
| 1,460 | 23.35 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/polyint.py
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.special import factorial
from scipy._lib.six import xrange
from scipy._lib._util import _asarray_validated
__all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator",
"barycentric_interpolate", "approximate_taylor_polynomial"]
def _isscalar(x):
"""Check whether x is if a scalar type, or 0-dim"""
return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()
class _Interpolator1D(object):
"""
Common features in univariate interpolation
Deal with input data type and interpolation axis rolling. The
actual interpolator can assume the y-data is of shape (n, r) where
`n` is the number of x-points, and `r` the number of variables,
and use self.dtype as the y-data type.
Attributes
----------
_y_axis
Axis along which the interpolation goes in the original array
_y_extra_shape
Additional trailing shape of the input arrays, excluding
the interpolation axis.
dtype
Dtype of the y-data arrays. Can be set via set_dtype, which
forces it to be float or complex.
Methods
-------
__call__
_prepare_x
_finish_y
_reshape_yi
_set_yi
_set_dtype
_evaluate
"""
__slots__ = ('_y_axis', '_y_extra_shape', 'dtype')
def __init__(self, xi=None, yi=None, axis=None):
self._y_axis = axis
self._y_extra_shape = None
self.dtype = None
if yi is not None:
self._set_yi(yi, xi=xi, axis=axis)
def __call__(self, x):
"""
Evaluate the interpolant
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate(x)
return self._finish_y(y, x_shape)
def _evaluate(self, x):
"""
Actually evaluate the value of the interpolator.
"""
raise NotImplementedError()
def _prepare_x(self, x):
"""Reshape input x array to 1-D"""
x = _asarray_validated(x, check_finite=False, as_inexact=True)
x_shape = x.shape
return x.ravel(), x_shape
def _finish_y(self, y, x_shape):
"""Reshape interpolated y back to n-d array similar to initial y"""
y = y.reshape(x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = (list(range(nx, nx + self._y_axis))
+ list(range(nx)) + list(range(nx+self._y_axis, nx+ny)))
y = y.transpose(s)
return y
def _reshape_yi(self, yi, check=False):
yi = np.rollaxis(np.asarray(yi), self._y_axis)
if check and yi.shape[1:] != self._y_extra_shape:
ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:],
self._y_extra_shape[:-self._y_axis])
raise ValueError("Data must be of shape %s" % ok_shape)
return yi.reshape((yi.shape[0], -1))
def _set_yi(self, yi, xi=None, axis=None):
if axis is None:
axis = self._y_axis
if axis is None:
raise ValueError("no interpolation axis specified")
yi = np.asarray(yi)
shape = yi.shape
if shape == ():
shape = (1,)
if xi is not None and shape[axis] != len(xi):
raise ValueError("x and y arrays must be equal in length along "
"interpolation axis.")
self._y_axis = (axis % yi.ndim)
self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:]
self.dtype = None
self._set_dtype(yi.dtype)
def _set_dtype(self, dtype, union=False):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.dtype, np.complexfloating):
self.dtype = np.complex_
else:
if not union or self.dtype != np.complex_:
self.dtype = np.float_
class _Interpolator1DWithDerivatives(_Interpolator1D):
def derivatives(self, x, der=None):
"""
Evaluate many derivatives of the polynomial at the point x
Produce an array of all derivative values at the point x.
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : int or None, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points). This number includes the function value as 0th
derivative.
Returns
-------
d : ndarray
Array with derivatives; d[j] contains the j-th derivative.
Shape of d[j] is determined by replacing the interpolation
axis in the original array with the shape of x.
Examples
--------
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)
array([1.0,2.0,3.0])
>>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])
array([[1.0,1.0],
[2.0,2.0],
[3.0,3.0]])
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der)
y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape)
if self._y_axis != 0 and x_shape != ():
nx = len(x_shape)
ny = len(self._y_extra_shape)
s = ([0] + list(range(nx+1, nx + self._y_axis+1))
+ list(range(1,nx+1)) +
list(range(nx+1+self._y_axis, nx+ny+1)))
y = y.transpose(s)
return y
def derivative(self, x, der=1):
"""
Evaluate one derivative of the polynomial at the point x
Parameters
----------
x : array_like
Point or points at which to evaluate the derivatives
der : integer, optional
Which derivative to extract. This number includes the
function value as 0th derivative.
Returns
-------
d : ndarray
Derivative interpolated at the x-points. Shape of d is
determined by replacing the interpolation axis in the
original array with the shape of x.
Notes
-----
This is computed by evaluating all derivatives up to the desired
one (using self.derivatives()) and then discarding the rest.
"""
x, x_shape = self._prepare_x(x)
y = self._evaluate_derivatives(x, der+1)
return self._finish_y(y[der], x_shape)
class KroghInterpolator(_Interpolator1DWithDerivatives):
"""
Interpolating polynomial for a set of points.
The polynomial passes through all the pairs (xi,yi). One may
additionally specify a number of derivatives at each point xi;
this is done by repeating the value xi and specifying the
derivatives as successive yi values.
Allows evaluation of the polynomial and all its derivatives.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial, although they can be obtained
by evaluating all the derivatives.
Parameters
----------
xi : array_like, length N
Known x-coordinates. Must be sorted in increasing order.
yi : array_like
Known y-coordinates. When an xi occurs two or more times in
a row, the corresponding yi's represent derivative values.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
Be aware that the algorithms implemented here are not necessarily
the most numerically stable known. Moreover, even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon. In general, even with well-chosen
x values, degrees higher than about thirty cause problems with
numerical instability in this code.
Based on [1]_.
References
----------
.. [1] Krogh, "Efficient Algorithms for Polynomial Interpolation
and Numerical Differentiation", 1970.
Examples
--------
To produce a polynomial that is zero at 0 and 1 and has
derivative 2 at 0, call
>>> from scipy.interpolate import KroghInterpolator
>>> KroghInterpolator([0,0,1],[0,2,0])
This constructs the quadratic 2*X**2-2*X. The derivative condition
is indicated by the repeated zero in the xi array; the corresponding
yi values are 0, the function value, and 2, the derivative value.
For another example, given xi, yi, and a derivative ypi for each
point, appropriate arrays can be constructed as:
>>> xi = np.linspace(0, 1, 5)
>>> yi, ypi = np.random.rand(2, 5)
>>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))
>>> KroghInterpolator(xi_k, yi_k)
To produce a vector-valued polynomial, supply a higher-dimensional
array for yi:
>>> KroghInterpolator([0,1],[[2,3],[4,5]])
This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.
"""
def __init__(self, xi, yi, axis=0):
_Interpolator1DWithDerivatives.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
c = np.zeros((self.n+1, self.r), dtype=self.dtype)
c[0] = self.yi[0]
Vk = np.zeros((self.n, self.r), dtype=self.dtype)
for k in xrange(1,self.n):
s = 0
while s <= k and xi[k-s] == xi[k]:
s += 1
s -= 1
Vk[0] = self.yi[k]/float(factorial(s))
for i in xrange(k-s):
if xi[i] == xi[k]:
raise ValueError("Elements if `xi` can't be equal.")
if s == 0:
Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])
else:
Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])
c[k] = Vk[k-s]
self.c = c
def _evaluate(self, x):
pi = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0,np.newaxis,:]
for k in range(1, self.n):
w = x - self.xi[k-1]
pi = w*pi
p += pi[:,np.newaxis] * self.c[k]
return p
def _evaluate_derivatives(self, x, der=None):
n = self.n
r = self.r
if der is None:
der = self.n
pi = np.zeros((n, len(x)))
w = np.zeros((n, len(x)))
pi[0] = 1
p = np.zeros((len(x), self.r), dtype=self.dtype)
p += self.c[0, np.newaxis, :]
for k in xrange(1, n):
w[k-1] = x - self.xi[k-1]
pi[k] = w[k-1] * pi[k-1]
p += pi[k, :, np.newaxis] * self.c[k]
cn = np.zeros((max(der, n+1), len(x), r), dtype=self.dtype)
cn[:n+1, :, :] += self.c[:n+1, np.newaxis, :]
cn[0] = p
for k in xrange(1, n):
for i in xrange(1, n-k+1):
pi[i] = w[k+i-1]*pi[i-1] + pi[i]
cn[k] = cn[k] + pi[i, :, np.newaxis]*cn[k+i]
cn[k] *= factorial(k)
cn[n, :, :] = 0
return cn[:der]
def krogh_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for polynomial interpolation.
See `KroghInterpolator` for more details.
Parameters
----------
xi : array_like
Known x-coordinates.
yi : array_like
Known y-coordinates, of shape ``(xi.size, R)``. Interpreted as
vectors of length R, or scalars if R=1.
x : array_like
Point or points at which to evaluate the derivatives.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
d : ndarray
If the interpolator's values are R-dimensional then the
returned array will be the number of derivatives by N by R.
If `x` is a scalar, the middle dimension will be dropped; if
the `yi` are scalars then the last dimension will be dropped.
See Also
--------
KroghInterpolator
Notes
-----
Construction of the interpolating polynomial is a relatively expensive
process. If you want to evaluate it repeatedly consider using the class
KroghInterpolator (which is what this function uses).
"""
P = KroghInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(x,der=der)
else:
return P.derivatives(x,der=np.amax(der)+1)[der]
def approximate_taylor_polynomial(f,x,degree,scale,order=None):
"""
Estimate the Taylor polynomial of f at x by polynomial fitting.
Parameters
----------
f : callable
The function whose Taylor polynomial is sought. Should accept
a vector of `x` values.
x : scalar
The point at which the polynomial is to be evaluated.
degree : int
The degree of the Taylor polynomial
scale : scalar
The width of the interval to use to evaluate the Taylor polynomial.
Function values spread over a range this wide are used to fit the
polynomial. Must be chosen carefully.
order : int or None, optional
The order of the polynomial to be used in the fitting; `f` will be
evaluated ``order+1`` times. If None, use `degree`.
Returns
-------
p : poly1d instance
The Taylor polynomial (translated to the origin, so that
for example p(0)=f(x)).
Notes
-----
The appropriate choice of "scale" is a trade-off; too large and the
function differs from its Taylor polynomial too much to get a good
answer, too small and round-off errors overwhelm the higher-order terms.
The algorithm used becomes numerically unstable around order 30 even
under ideal circumstances.
Choosing order somewhat larger than degree may improve the higher-order
terms.
"""
if order is None:
order = degree
n = order+1
# Choose n points that cluster near the endpoints of the interval in
# a way that avoids the Runge phenomenon. Ensure, by including the
# endpoint or not as appropriate, that one point always falls at x
# exactly.
xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n % 1)) + x
P = KroghInterpolator(xs, f(xs))
d = P.derivatives(x,der=degree+1)
return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])
class BarycentricInterpolator(_Interpolator1D):
"""The interpolating polynomial for a set of points
Constructs a polynomial that passes through a given set of points.
Allows evaluation of the polynomial, efficient changing of the y
values to be interpolated, and updating by adding more x values.
For reasons of numerical stability, this function does not compute
the coefficients of the polynomial.
The values yi need to be provided before the function is
evaluated, but none of the preprocessing depends on them, so rapid
updates are possible.
Parameters
----------
xi : array_like
1-d array of x coordinates of the points the polynomial
should pass through
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later via the `set_y` method.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Notes
-----
This class uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the x coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Based on Berrut and Trefethen 2004, "Barycentric Lagrange Interpolation".
"""
def __init__(self, xi, yi=None, axis=0):
_Interpolator1D.__init__(self, xi, yi, axis)
self.xi = np.asarray(xi)
self.set_yi(yi)
self.n = len(self.xi)
self.wi = np.zeros(self.n)
self.wi[0] = 1
for j in xrange(1,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def set_yi(self, yi, axis=None):
"""
Update the y values to be interpolated
The barycentric interpolation algorithm requires the calculation
of weights, but these depend only on the xi. The yi can be changed
at any time.
Parameters
----------
yi : array_like
The y coordinates of the points the polynomial should pass through.
If None, the y values will be supplied later.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
"""
if yi is None:
self.yi = None
return
self._set_yi(yi, xi=self.xi, axis=axis)
self.yi = self._reshape_yi(yi)
self.n, self.r = self.yi.shape
def add_xi(self, xi, yi=None):
"""
Add more x values to the set to be interpolated
The barycentric interpolation algorithm allows easy updating by
adding more points for the polynomial to pass through.
Parameters
----------
xi : array_like
The x coordinates of the points that the polynomial should pass
through.
yi : array_like, optional
The y coordinates of the points the polynomial should pass through.
Should have shape ``(xi.size, R)``; if R > 1 then the polynomial is
vector-valued.
If `yi` is not given, the y values will be supplied later. `yi` should
be given if and only if the interpolator has y values specified.
"""
if yi is not None:
if self.yi is None:
raise ValueError("No previous yi value to update!")
yi = self._reshape_yi(yi, check=True)
self.yi = np.vstack((self.yi,yi))
else:
if self.yi is not None:
raise ValueError("No update to yi provided!")
old_n = self.n
self.xi = np.concatenate((self.xi,xi))
self.n = len(self.xi)
self.wi **= -1
old_wi = self.wi
self.wi = np.zeros(self.n)
self.wi[:old_n] = old_wi
for j in xrange(old_n,self.n):
self.wi[:j] *= (self.xi[j]-self.xi[:j])
self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])
self.wi **= -1
def __call__(self, x):
"""Evaluate the interpolating polynomial at the points x
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Currently the code computes an outer product between x and the
weights, that is, it constructs an intermediate array of size
N by len(x), where N is the degree of the polynomial.
"""
return _Interpolator1D.__call__(self, x)
def _evaluate(self, x):
if x.size == 0:
p = np.zeros((0, self.r), dtype=self.dtype)
else:
c = x[...,np.newaxis]-self.xi
z = c == 0
c[z] = 1
c = self.wi/c
p = np.dot(c,self.yi)/np.sum(c,axis=-1)[...,np.newaxis]
# Now fix where x==some xi
r = np.nonzero(z)
if len(r) == 1: # evaluation at a scalar
if len(r[0]) > 0: # equals one of the points
p = self.yi[r[0][0]]
else:
p[r[:-1]] = self.yi[r[-1]]
return p
def barycentric_interpolate(xi, yi, x, axis=0):
"""
Convenience function for polynomial interpolation.
Constructs a polynomial that passes through a given set of points,
then evaluates the polynomial. For reasons of numerical stability,
this function does not compute the coefficients of the polynomial.
This function uses a "barycentric interpolation" method that treats
the problem as a special case of rational function interpolation.
This algorithm is quite stable, numerically, but even in a world of
exact computation, unless the `x` coordinates are chosen very
carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -
polynomial interpolation itself is a very ill-conditioned process
due to the Runge phenomenon.
Parameters
----------
xi : array_like
1-d array of x coordinates of the points the polynomial should
pass through
yi : array_like
The y coordinates of the points the polynomial should pass through.
x : scalar or array_like
Points to evaluate the interpolator at.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
Returns
-------
y : scalar or array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
See Also
--------
BarycentricInterpolator
Notes
-----
Construction of the interpolation weights is a relatively slow process.
If you want to call this many times with the same xi (but possibly
varying yi or x) you should use the class `BarycentricInterpolator`.
This is what this function uses internally.
"""
return BarycentricInterpolator(xi, yi, axis=axis)(x)
| 22,677 | 33 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/fitpack.py
|
from __future__ import print_function, division, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from ._fitpack_impl import bisplrep, bisplev, dblint
from . import _fitpack_impl as _impl
from ._bsplines import BSpline
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
The number of coefficients in the `c` array is ``k+1`` less then the number
of knots, ``len(t)``. This is in contrast with `splrep`, which zero-pads
the array of coefficients to have the same length as the array of knots.
These additional coefficients are ignored by evaluation routines, `splev`
and `BSpline`.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
Generate a discretization of a limacon curve in the polar coordinates:
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.5 + np.cos(phi) # polar coords
>>> x, y = r * np.cos(phi), r * np.sin(phi) # convert to cartesian
And interpolate:
>>> from scipy.interpolate import splprep, splev
>>> tck, u = splprep([x, y], s=0)
>>> new_points = splev(u, tck)
Notice that (i) we force interpolation by using `s=0`,
(ii) the parameterization, ``u``, is generated automatically.
Now plot the result:
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y, 'ro')
>>> ax.plot(new_points[0], new_points[1], 'r-')
>>> plt.show()
"""
res = _impl.splprep(x, w, u, ub, ue, k, task, s, t, full_output, nest, per,
quiet)
return res
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The degree of the spline fit. It is recommended to use cubic splines.
Even values of k should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
BSpline
make_interp_spline
Notes
-----
See `splev` for evaluation of the spline and its derivatives. Uses the
FORTRAN routine ``curfit`` from FITPACK.
The user is responsible for assuring that the values of `x` are unique.
Otherwise, `splrep` will not return sensible results.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
This routine zero-pads the coefficients array ``c`` to have the same length
as the array of knots ``t`` (the trailing ``k + 1`` coefficients are ignored
by the evaluation routines, `splev` and `BSpline`.) This is in contrast with
`splprep`, which does not zero-pad the coefficients.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> spl = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, spl)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : 3-tuple or a BSpline object
If a tuple, then it should be a sequence of length 3 returned by
`splrep` or `splprep` containing the knots, coefficients, and degree
of the spline. (Also see Notes.)
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in `x`. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splev() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.__call__(x) instead.")
warnings.warn(mesg, DeprecationWarning)
# remap the out-of-bounds behavior
try:
extrapolate = {0: True, }[ext]
except KeyError:
raise ValueError("Extrapolation mode %s is not supported "
"by BSpline." % ext)
return tck(x, der, extrapolate=extrapolate)
else:
return _impl.splev(x, tck, der, ext)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple or a BSpline instance
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
(Only returned if `full_output` is non-zero)
Notes
-----
`splint` silently assumes that the spline function is zero outside the data
interval (`a`, `b`).
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling splint() with BSpline objects with c.ndim > 1 is "
"not recommended. Use BSpline.integrate() instead.")
warnings.warn(mesg, DeprecationWarning)
if full_output != 0:
mesg = ("full_output = %s is not supported. Proceeding as if "
"full_output = 0" % full_output)
return tck.integrate(a, b, extrapolate=False)
else:
return _impl.splint(a, b, tck, full_output)
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple or a BSpline object
If a tuple, then it should be a sequence of length 3, containing the
vector of knots, the B-spline coefficients, and the degree of the
spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
Notes
-----
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
BSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M. G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
if tck.c.ndim > 1:
mesg = ("Calling sproot() with BSpline objects with c.ndim > 1 is "
"not recommended.")
warnings.warn(mesg, DeprecationWarning)
t, c, k = tck.tck
# _impl.sproot expects the interpolation axis to be last, so roll it.
# NB: This transpose is a no-op if c is 1D.
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
return _impl.sproot((t, c, k), mest)
else:
return _impl.sproot(tck, mest)
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple ``(t, c, k)``, containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
BSpline
References
----------
.. [1] C. de Boor: On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] M. G. Cox : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] P. Dierckx : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
raise TypeError("spalde does not accept BSpline instances.")
else:
return _impl.spalde(x, tck)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : a `BSpline` instance or a tuple
If tuple, then it is expected to be a tuple (t,c,k) containing
the vector of knots, the B-spline coefficients, and the degree of
the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
BSpline instance or a tuple
A new B-spline with knots t, coefficients c, and degree k.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
Based on algorithms from [1]_ and [2]_.
Manipulating the tck-tuples directly is not recommended. In new code,
prefer using the `BSpline` objects.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
# FITPACK expects the interpolation axis to be last, so roll it over
# NB: if c array is 1D, transposes are no-ops
sh = tuple(range(c.ndim))
c = c.transpose(sh[1:] + (0,))
t_, c_, k_ = _impl.insert(x, (t, c, k), m, per)
# and roll the last axis back
c_ = np.asarray(c_)
c_ = c_.transpose((sh[-1],) + sh[:-1])
return BSpline(t_, c_, k_)
else:
return _impl.insert(x, tck, m, per)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
`BSpline` instance or tuple
Spline of order k2=k-n representing the derivative
of the input spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
BSpline
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if isinstance(tck, BSpline):
return tck.derivative(n)
else:
return _impl.splder(tck, n)
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : BSpline instance or a tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
BSpline instance or a tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
A tuple is returned iff the input argument `tck` is a tuple, otherwise
a BSpline object is constructed and returned.
See Also
--------
splder, splev, spalde
BSpline
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if isinstance(tck, BSpline):
return tck.antiderivative(n)
else:
return _impl.splantider(tck, n)
| 25,622 | 34.439834 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/_bsplines.py
|
from __future__ import division, print_function, absolute_import
import functools
import operator
import numpy as np
from scipy._lib.six import string_types
from scipy.linalg import (get_lapack_funcs, LinAlgError,
cholesky_banded, cho_solve_banded)
from . import _bspl
from . import _fitpack_impl
from . import _fitpack as _dierckx
__all__ = ["BSpline", "make_interp_spline", "make_lsq_spline"]
# copy-paste from interpolate.py
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def _get_dtype(dtype):
"""Return np.complex128 for complex dtypes, np.float64 otherwise."""
if np.issubdtype(dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _as_float_array(x, check_finite=False):
"""Convert the input into a C contiguous float array.
NB: Upcasts half- and single-precision floats to double precision.
"""
x = np.ascontiguousarray(x)
dtyp = _get_dtype(x.dtype)
x = x.astype(dtyp, copy=False)
if check_finite and not np.isfinite(x).all():
raise ValueError("Array must not contain infs or nans.")
return x
class BSpline(object):
r"""Univariate spline in the B-spline basis.
.. math::
S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x)
where :math:`B_{j, k; t}` are B-spline basis functions of degree `k`
and knots `t`.
Parameters
----------
t : ndarray, shape (n+k+1,)
knots
c : ndarray, shape (>=n, ...)
spline coefficients
k : int
B-spline order
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[k] .. t[n]``,
or to return nans.
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
If 'periodic', periodic extrapolation is used.
Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
t : ndarray
knot vector
c : ndarray
spline coefficients
k : int
spline degree
extrapolate : bool
If True, extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
axis : int
Interpolation axis.
tck : tuple
A read-only equivalent of ``(self.t, self.c, self.k)``
Methods
-------
__call__
basis_element
derivative
antiderivative
integrate
construct_fast
Notes
-----
B-spline basis elements are defined via
.. math::
B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,}
B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x)
+ \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x)
**Implementation details**
- At least ``k+1`` coefficients are required for a spline of degree `k`,
so that ``n >= k+1``. Additional coefficients, ``c[j]`` with
``j > n``, are ignored.
- B-spline basis elements of degree `k` form a partition of unity on the
*base interval*, ``t[k] <= x <= t[n]``.
Examples
--------
Translating the recursive definition of B-splines into Python code, we have:
>>> def B(x, k, i, t):
... if k == 0:
... return 1.0 if t[i] <= x < t[i+1] else 0.0
... if t[i+k] == t[i]:
... c1 = 0.0
... else:
... c1 = (x - t[i])/(t[i+k] - t[i]) * B(x, k-1, i, t)
... if t[i+k+1] == t[i+1]:
... c2 = 0.0
... else:
... c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * B(x, k-1, i+1, t)
... return c1 + c2
>>> def bspline(x, t, c, k):
... n = len(t) - k - 1
... assert (n >= k+1) and (len(c) >= n)
... return sum(c[i] * B(x, k, i, t) for i in range(n))
Note that this is an inefficient (if straightforward) way to
evaluate B-splines --- this spline class does it in an equivalent,
but much more efficient way.
Here we construct a quadratic spline function on the base interval
``2 <= x <= 4`` and compare with the naive way of evaluating the spline:
>>> from scipy.interpolate import BSpline
>>> k = 2
>>> t = [0, 1, 2, 3, 4, 5, 6]
>>> c = [-1, 2, 0, -1]
>>> spl = BSpline(t, c, k)
>>> spl(2.5)
array(1.375)
>>> bspline(2.5, t, c, k)
1.375
Note that outside of the base interval results differ. This is because
`BSpline` extrapolates the first and last polynomial pieces of b-spline
functions active on the base interval.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> xx = np.linspace(1.5, 4.5, 50)
>>> ax.plot(xx, [bspline(x, t, c ,k) for x in xx], 'r-', lw=3, label='naive')
>>> ax.plot(xx, spl(xx), 'b-', lw=4, alpha=0.7, label='BSpline')
>>> ax.grid(True)
>>> ax.legend(loc='best')
>>> plt.show()
References
----------
.. [1] Tom Lyche and Knut Morken, Spline methods,
http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/
.. [2] Carl de Boor, A practical guide to splines, Springer, 2001.
"""
def __init__(self, t, c, k, extrapolate=True, axis=0):
super(BSpline, self).__init__()
self.k = int(k)
self.c = np.asarray(c)
self.t = np.ascontiguousarray(t, dtype=np.float64)
if extrapolate == 'periodic':
self.extrapolate = extrapolate
else:
self.extrapolate = bool(extrapolate)
n = self.t.shape[0] - self.k - 1
if not (0 <= axis < self.c.ndim):
raise ValueError("%s must be between 0 and %s" % (axis, c.ndim))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (n, ...),
# and axis !=0 means that we have c.shape (..., n, ...)
# ^
# axis
self.c = np.rollaxis(self.c, axis)
if k < 0:
raise ValueError("Spline order cannot be negative.")
if int(k) != k:
raise ValueError("Spline order must be integer.")
if self.t.ndim != 1:
raise ValueError("Knot vector must be one-dimensional.")
if n < self.k + 1:
raise ValueError("Need at least %d knots for degree %d" %
(2*k + 2, k))
if (np.diff(self.t) < 0).any():
raise ValueError("Knots must be in a non-decreasing order.")
if len(np.unique(self.t[k:n+1])) < 2:
raise ValueError("Need at least two internal knots.")
if not np.isfinite(self.t).all():
raise ValueError("Knots should not have nans or infs.")
if self.c.ndim < 1:
raise ValueError("Coefficients must be at least 1-dimensional.")
if self.c.shape[0] < n:
raise ValueError("Knots, coefficients and degree are inconsistent.")
dt = _get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dt)
@classmethod
def construct_fast(cls, t, c, k, extrapolate=True, axis=0):
"""Construct a spline without making checks.
Accepts same parameters as the regular constructor. Input arrays
`t` and `c` must of correct shape and dtype.
"""
self = object.__new__(cls)
self.t, self.c, self.k = t, c, k
self.extrapolate = extrapolate
self.axis = axis
return self
@property
def tck(self):
"""Equivalent to ``(self.t, self.c, self.k)`` (read-only).
"""
return self.t, self.c, self.k
@classmethod
def basis_element(cls, t, extrapolate=True):
"""Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``.
Parameters
----------
t : ndarray, shape (k+1,)
internal knots
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval, ``t[0] .. t[k+1]``,
or to return nans.
If 'periodic', periodic extrapolation is used.
Default is True.
Returns
-------
basis_element : callable
A callable representing a B-spline basis element for the knot
vector `t`.
Notes
-----
The order of the b-spline, `k`, is inferred from the length of `t` as
``len(t)-2``. The knot vector is constructed by appending and prepending
``k+1`` elements to internal knots `t`.
Examples
--------
Construct a cubic b-spline:
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2, 3, 4])
>>> k = b.k
>>> b.t[k:-k]
array([ 0., 1., 2., 3., 4.])
>>> k
3
Construct a second order b-spline on ``[0, 1, 1, 2]``, and compare
to its explicit form:
>>> t = [-1, 0, 1, 1, 2]
>>> b = BSpline.basis_element(t[1:])
>>> def f(x):
... return np.where(x < 1, x*x, (2. - x)**2)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0, 2, 51)
>>> ax.plot(x, b(x), 'g', lw=3)
>>> ax.plot(x, f(x), 'r', lw=8, alpha=0.4)
>>> ax.grid(True)
>>> plt.show()
"""
k = len(t) - 2
t = _as_float_array(t)
t = np.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k]
c = np.zeros_like(t)
c[k] = 1.
return cls.construct_fast(t, c, k, extrapolate)
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate a spline function.
Parameters
----------
x : array_like
points to evaluate the spline at.
nu: int, optional
derivative to evaluate (default is 0).
extrapolate : bool or 'periodic', optional
whether to extrapolate based on the first and last intervals
or return nans. If 'periodic', periodic extrapolation is used.
Default is `self.extrapolate`.
Returns
-------
y : array_like
Shape is determined by replacing the interpolation axis
in the coefficient array with the shape of `x`.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.t[k], self.t[n]].
if extrapolate == 'periodic':
n = self.t.size - self.k - 1
x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] -
self.t[self.k])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[1:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[1:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
def _evaluate(self, xp, nu, extrapolate, out):
_bspl.evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1),
self.k, xp, nu, extrapolate, out)
def _ensure_c_contiguous(self):
"""
c and t may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.t.flags.c_contiguous:
self.t = self.t.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def derivative(self, nu=1):
"""Return a b-spline representing the derivative.
Parameters
----------
nu : int, optional
Derivative order.
Default is 1.
Returns
-------
b : BSpline object
A new instance representing the derivative.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
tck = _fitpack_impl.splder((self.t, c, self.k), nu)
return self.construct_fast(*tck, extrapolate=self.extrapolate,
axis=self.axis)
def antiderivative(self, nu=1):
"""Return a b-spline representing the antiderivative.
Parameters
----------
nu : int, optional
Antiderivative order. Default is 1.
Returns
-------
b : BSpline object
A new instance representing the antiderivative.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
See Also
--------
splder, splantider
"""
c = self.c
# pad the c array if needed
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
tck = _fitpack_impl.splantider((self.t, c, self.k), nu)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(*tck, extrapolate=extrapolate,
axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""Compute a definite integral of the spline.
Parameters
----------
a : float
Lower limit of integration.
b : float
Upper limit of integration.
extrapolate : bool or 'periodic', optional
whether to extrapolate beyond the base interval,
``t[k] .. t[-k-1]``, or take the spline to be zero outside of the
base interval. If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
I : array_like
Definite integral of the spline over the interval ``[a, b]``.
Examples
--------
Construct the linear spline ``x if x < 1 else 2 - x`` on the base
interval :math:`[0, 2]`, and integrate it
>>> from scipy.interpolate import BSpline
>>> b = BSpline.basis_element([0, 1, 2])
>>> b.integrate(0, 1)
array(0.5)
If the integration limits are outside of the base interval, the result
is controlled by the `extrapolate` parameter
>>> b.integrate(-1, 1)
array(0.0)
>>> b.integrate(-1, 1, extrapolate=False)
array(0.5)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> ax.grid(True)
>>> ax.axvline(0, c='r', lw=5, alpha=0.5) # base interval
>>> ax.axvline(2, c='r', lw=5, alpha=0.5)
>>> xx = [-1, 1, 2]
>>> ax.plot(xx, b(xx))
>>> plt.show()
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Prepare self.t and self.c.
self._ensure_c_contiguous()
# Swap integration bounds if needed.
sign = 1
if b < a:
a, b = b, a
sign = -1
n = self.t.size - self.k - 1
if extrapolate != "periodic" and not extrapolate:
# Shrink the integration interval, if needed.
a = max(a, self.t[self.k])
b = min(b, self.t[n])
if self.c.ndim == 1:
# Fast path: use FITPACK's routine
# (cf _fitpack_impl.splint).
t, c, k = self.tck
integral, wrk = _dierckx._splint(t, c, k, a, b)
return integral * sign
out = np.empty((2, prod(self.c.shape[1:])), dtype=self.c.dtype)
# Compute the antiderivative.
c = self.c
ct = len(self.t) - len(c)
if ct > 0:
c = np.r_[c, np.zeros((ct,) + c.shape[1:])]
ta, ca, ka = _fitpack_impl.splantider((self.t, c, self.k), 1)
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
ts, te = self.t[self.k], self.t[n]
period = te - ts
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
# Evaluate the difference of antiderivatives.
x = np.asarray([ts, te], dtype=np.float_)
_bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral = out[1] - out[0]
integral *= n_periods
else:
integral = np.zeros((1, prod(self.c.shape[1:])),
dtype=self.c.dtype)
# Map a to [ts, te], b is always a + left.
a = ts + (a - ts) % period
b = a + left
# If b <= te then we need to integrate over [a, b], otherwise
# over [a, te] and from xs to what is remained.
if b <= te:
x = np.asarray([a, b], dtype=np.float_)
_bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
x = np.asarray([a, te], dtype=np.float_)
_bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
x = np.asarray([ts, ts + b - te], dtype=np.float_)
_bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, False, out)
integral += out[1] - out[0]
else:
# Evaluate the difference of antiderivatives.
x = np.asarray([a, b], dtype=np.float_)
_bspl.evaluate_spline(ta, ca.reshape(ca.shape[0], -1),
ka, x, 0, extrapolate, out)
integral = out[1] - out[0]
integral *= sign
return integral.reshape(ca.shape[1:])
#################################
# Interpolating spline helpers #
#################################
def _not_a_knot(x, k):
"""Given data x, construct the knot vector w/ not-a-knot BC.
cf de Boor, XIII(12)."""
x = np.asarray(x)
if k % 2 != 1:
raise ValueError("Odd degree for now only. Got %s." % k)
m = (k - 1) // 2
t = x[m+1:-m-1]
t = np.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)]
return t
def _augknt(x, k):
"""Construct a knot vector appropriate for the order-k interpolation."""
return np.r_[(x[0],)*k, x, (x[-1],)*k]
def _convert_string_aliases(deriv, target_shape):
if isinstance(deriv, string_types):
if deriv == "clamped":
deriv = [(1, np.zeros(target_shape))]
elif deriv == "natural":
deriv = [(2, np.zeros(target_shape))]
else:
raise ValueError("Unknown boundary condition : %s" % deriv)
return deriv
def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0,
check_finite=True):
"""Compute the (coefficients of) interpolating B-spline.
Parameters
----------
x : array_like, shape (n,)
Abscissas.
y : array_like, shape (n, ...)
Ordinates.
k : int, optional
B-spline degree. Default is cubic, k=3.
t : array_like, shape (nt + k + 1,), optional.
Knots.
The number of knots needs to agree with the number of datapoints and
the number of derivatives at the edges. Specifically, ``nt - n`` must
equal ``len(deriv_l) + len(deriv_r)``.
bc_type : 2-tuple or None
Boundary conditions.
Default is None, which means choosing the boundary conditions
automatically. Otherwise, it must be a length-two tuple where the first
element sets the boundary conditions at ``x[0]`` and the second
element sets the boundary conditions at ``x[-1]``. Each of these must
be an iterable of pairs ``(order, value)`` which gives the values of
derivatives of specified orders at the given edge of the interpolation
interval.
Alternatively, the following string aliases are recognized:
* ``"clamped"``: The first derivatives at the ends are zero. This is
equivalent to ``bc_type=((1, 0.0), (1, 0.0))``.
* ``"natural"``: The second derivatives at ends are zero. This is
equivalent to ``bc_type=((2, 0.0), (2, 0.0))``.
* ``"not-a-knot"`` (default): The first and second segments are the same
polynomial. This is equivalent to having ``bc_type=None``.
axis : int, optional
Interpolation axis. Default is 0.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
Returns
-------
b : a BSpline object of the degree ``k`` and with knots ``t``.
Examples
--------
Use cubic interpolation on Chebyshev nodes:
>>> def cheb_nodes(N):
... jj = 2.*np.arange(N) + 1
... x = np.cos(np.pi * jj / 2 / N)[::-1]
... return x
>>> x = cheb_nodes(20)
>>> y = np.sqrt(1 - x**2)
>>> from scipy.interpolate import BSpline, make_interp_spline
>>> b = make_interp_spline(x, y)
>>> np.allclose(b(x), y)
True
Note that the default is a cubic spline with a not-a-knot boundary condition
>>> b.k
3
Here we use a 'natural' spline, with zero 2nd derivatives at edges:
>>> l, r = [(2, 0.0)], [(2, 0.0)]
>>> b_n = make_interp_spline(x, y, bc_type=(l, r)) # or, bc_type="natural"
>>> np.allclose(b_n(x), y)
True
>>> x0, x1 = x[0], x[-1]
>>> np.allclose([b_n(x0, 2), b_n(x1, 2)], [0, 0])
True
Interpolation of parametric curves is also supported. As an example, we
compute a discretization of a snail curve in polar coordinates
>>> phi = np.linspace(0, 2.*np.pi, 40)
>>> r = 0.3 + np.cos(phi)
>>> x, y = r*np.cos(phi), r*np.sin(phi) # convert to Cartesian coordinates
Build an interpolating curve, parameterizing it by the angle
>>> from scipy.interpolate import make_interp_spline
>>> spl = make_interp_spline(phi, np.c_[x, y])
Evaluate the interpolant on a finer grid (note that we transpose the result
to unpack it into a pair of x- and y-arrays)
>>> phi_new = np.linspace(0, 2.*np.pi, 100)
>>> x_new, y_new = spl(phi_new).T
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
>>> plt.plot(x_new, y_new, '-')
>>> plt.show()
See Also
--------
BSpline : base class representing the B-spline objects
CubicSpline : a cubic spline in the polynomial basis
make_lsq_spline : a similar factory function for spline fitting
UnivariateSpline : a wrapper over FITPACK spline fitting routines
splrep : a wrapper over FITPACK spline fitting routines
"""
# convert string aliases for the boundary conditions
if bc_type is None or bc_type == 'not-a-knot':
deriv_l, deriv_r = None, None
elif isinstance(bc_type, string_types):
deriv_l, deriv_r = bc_type, bc_type
else:
deriv_l, deriv_r = bc_type
# special-case k=0 right away
if k == 0:
if any(_ is not None for _ in (t, deriv_l, deriv_r)):
raise ValueError("Too much info for k=0: t and bc_type can only "
"be None.")
x = _as_float_array(x, check_finite)
t = np.r_[x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
return BSpline.construct_fast(t, c, k, axis=axis)
# special-case k=1 (e.g., Lyche and Morken, Eq.(2.16))
if k == 1 and t is None:
if not (deriv_l is None and deriv_r is None):
raise ValueError("Too much info for k=1: bc_type can only be None.")
x = _as_float_array(x, check_finite)
t = np.r_[x[0], x, x[-1]]
c = np.asarray(y)
c = np.ascontiguousarray(c, dtype=_get_dtype(c.dtype))
return BSpline.construct_fast(t, c, k, axis=axis)
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
k = int(k)
# come up with a sensible knot vector, if needed
if t is None:
if deriv_l is None and deriv_r is None:
if k == 2:
# OK, it's a bit ad hoc: Greville sites + omit
# 2nd and 2nd-to-last points, a la not-a-knot
t = (x[1:] + x[:-1]) / 2.
t = np.r_[(x[0],)*(k+1),
t[1:-1],
(x[-1],)*(k+1)]
else:
t = _not_a_knot(x, k)
else:
t = _augknt(x, k)
t = _as_float_array(t, check_finite)
axis = axis % y.ndim
y = np.rollaxis(y, axis) # now internally interp axis is zero
if x.ndim != 1 or np.any(x[1:] <= x[:-1]):
raise ValueError("Expect x to be a 1-D sorted array_like.")
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] < t[:-1]):
raise ValueError("Expect t to be a 1-D sorted array_like.")
if x.size != y.shape[0]:
raise ValueError('x and y are incompatible.')
if t.size < x.size + k + 1:
raise ValueError('Got %d knots, need at least %d.' %
(t.size, x.size + k + 1))
if (x[0] < t[k]) or (x[-1] > t[-k]):
raise ValueError('Out of bounds w/ x = %s.' % x)
# Here : deriv_l, r = [(nu, value), ...]
deriv_l = _convert_string_aliases(deriv_l, y.shape[1:])
if deriv_l is not None:
deriv_l_ords, deriv_l_vals = zip(*deriv_l)
else:
deriv_l_ords, deriv_l_vals = [], []
deriv_l_ords, deriv_l_vals = np.atleast_1d(deriv_l_ords, deriv_l_vals)
nleft = deriv_l_ords.shape[0]
deriv_r = _convert_string_aliases(deriv_r, y.shape[1:])
if deriv_r is not None:
deriv_r_ords, deriv_r_vals = zip(*deriv_r)
else:
deriv_r_ords, deriv_r_vals = [], []
deriv_r_ords, deriv_r_vals = np.atleast_1d(deriv_r_ords, deriv_r_vals)
nright = deriv_r_ords.shape[0]
# have `n` conditions for `nt` coefficients; need nt-n derivatives
n = x.size
nt = t.size - k - 1
if nt - n != nleft + nright:
raise ValueError("number of derivatives at boundaries.")
# set up the LHS: the collocation matrix + derivatives at boundaries
kl = ku = k
ab = np.zeros((2*kl + ku + 1, nt), dtype=np.float_, order='F')
_bspl._colloc(x, t, k, ab, offset=nleft)
if nleft > 0:
_bspl._handle_lhs_derivatives(t, k, x[0], ab, kl, ku, deriv_l_ords)
if nright > 0:
_bspl._handle_lhs_derivatives(t, k, x[-1], ab, kl, ku, deriv_r_ords,
offset=nt-nright)
# set up the RHS: values to interpolate (+ derivative values, if any)
extradim = prod(y.shape[1:])
rhs = np.empty((nt, extradim), dtype=y.dtype)
if nleft > 0:
rhs[:nleft] = deriv_l_vals.reshape(-1, extradim)
rhs[nleft:nt - nright] = y.reshape(-1, extradim)
if nright > 0:
rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim)
# solve Ab @ x = rhs; this is the relevant part of linalg.solve_banded
if check_finite:
ab, rhs = map(np.asarray_chkfinite, (ab, rhs))
gbsv, = get_lapack_funcs(('gbsv',), (ab, rhs))
lu, piv, c, info = gbsv(kl, ku, ab, rhs,
overwrite_ab=True, overwrite_b=True)
if info > 0:
raise LinAlgError("Collocation matix is singular.")
elif info < 0:
raise ValueError('illegal value in %d-th argument of internal gbsv' % -info)
c = np.ascontiguousarray(c.reshape((nt,) + y.shape[1:]))
return BSpline.construct_fast(t, c, k, axis=axis)
def make_lsq_spline(x, y, t, k=3, w=None, axis=0, check_finite=True):
r"""Compute the (coefficients of) an LSQ B-spline.
The result is a linear combination
.. math::
S(x) = \sum_j c_j B_j(x; t)
of the B-spline basis elements, :math:`B_j(x; t)`, which minimizes
.. math::
\sum_{j} \left( w_j \times (S(x_j) - y_j) \right)^2
Parameters
----------
x : array_like, shape (m,)
Abscissas.
y : array_like, shape (m, ...)
Ordinates.
t : array_like, shape (n + k + 1,).
Knots.
Knots and data points must satisfy Schoenberg-Whitney conditions.
k : int, optional
B-spline degree. Default is cubic, k=3.
w : array_like, shape (n,), optional
Weights for spline fitting. Must be positive. If ``None``,
then weights are all equal.
Default is ``None``.
axis : int, optional
Interpolation axis. Default is zero.
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default is True.
Returns
-------
b : a BSpline object of the degree `k` with knots `t`.
Notes
-----
The number of data points must be larger than the spline degree `k`.
Knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
Examples
--------
Generate some noisy data:
>>> x = np.linspace(-3, 3, 50)
>>> y = np.exp(-x**2) + 0.1 * np.random.randn(50)
Now fit a smoothing cubic spline with a pre-defined internal knots.
Here we make the knot vector (k+1)-regular by adding boundary knots:
>>> from scipy.interpolate import make_lsq_spline, BSpline
>>> t = [-1, 0, 1]
>>> k = 3
>>> t = np.r_[(x[0],)*(k+1),
... t,
... (x[-1],)*(k+1)]
>>> spl = make_lsq_spline(x, y, t, k)
For comparison, we also construct an interpolating spline for the same
set of data:
>>> from scipy.interpolate import make_interp_spline
>>> spl_i = make_interp_spline(x, y)
Plot both:
>>> import matplotlib.pyplot as plt
>>> xs = np.linspace(-3, 3, 100)
>>> plt.plot(x, y, 'ro', ms=5)
>>> plt.plot(xs, spl(xs), 'g-', lw=3, label='LSQ spline')
>>> plt.plot(xs, spl_i(xs), 'b-', lw=3, alpha=0.7, label='interp spline')
>>> plt.legend(loc='best')
>>> plt.show()
**NaN handling**: If the input arrays contain ``nan`` values, the result is
not useful since the underlying spline fitting routines cannot deal with
``nan``. A workaround is to use zero weights for not-a-number data points:
>>> y[8] = np.nan
>>> w = np.isnan(y)
>>> y[w] = 0.
>>> tck = make_lsq_spline(x, y, t, w=~w)
Notice the need to replace a ``nan`` by a numerical value (precise value
does not matter as long as the corresponding weight is zero.)
See Also
--------
BSpline : base class representing the B-spline objects
make_interp_spline : a similar factory function for interpolating splines
LSQUnivariateSpline : a FITPACK-based spline fitting routine
splrep : a FITPACK-based fitting routine
"""
x = _as_float_array(x, check_finite)
y = _as_float_array(y, check_finite)
t = _as_float_array(t, check_finite)
if w is not None:
w = _as_float_array(w, check_finite)
else:
w = np.ones_like(x)
k = int(k)
axis = axis % y.ndim
y = np.rollaxis(y, axis) # now internally interp axis is zero
if x.ndim != 1 or np.any(x[1:] - x[:-1] <= 0):
raise ValueError("Expect x to be a 1-D sorted array_like.")
if x.shape[0] < k+1:
raise ValueError("Need more x points.")
if k < 0:
raise ValueError("Expect non-negative k.")
if t.ndim != 1 or np.any(t[1:] - t[:-1] < 0):
raise ValueError("Expect t to be a 1-D sorted array_like.")
if x.size != y.shape[0]:
raise ValueError('x & y are incompatible.')
if k > 0 and np.any((x < t[k]) | (x > t[-k])):
raise ValueError('Out of bounds w/ x = %s.' % x)
if x.size != w.size:
raise ValueError('Incompatible weights.')
# number of coefficients
n = t.size - k - 1
# construct A.T @ A and rhs with A the collocation matrix, and
# rhs = A.T @ y for solving the LSQ problem ``A.T @ A @ c = A.T @ y``
lower = True
extradim = prod(y.shape[1:])
ab = np.zeros((k+1, n), dtype=np.float_, order='F')
rhs = np.zeros((n, extradim), dtype=y.dtype, order='F')
_bspl._norm_eq_lsq(x, t, k,
y.reshape(-1, extradim),
w,
ab, rhs)
rhs = rhs.reshape((n,) + y.shape[1:])
# have observation matrix & rhs, can solve the LSQ problem
cho_decomp = cholesky_banded(ab, overwrite_ab=True, lower=lower,
check_finite=check_finite)
c = cho_solve_banded((cho_decomp, lower), rhs, overwrite_b=True,
check_finite=check_finite)
c = np.ascontiguousarray(c)
return BSpline.construct_fast(t, c, k, axis=axis)
| 34,120 | 32.951244 | 90 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/_fitpack_impl.py
|
"""
fitpack (dierckx in netlib) --- A Python-C wrapper to FITPACK (by P. Dierckx).
FITPACK is a collection of FORTRAN programs for curve and surface
fitting with splines and tensor product splines.
See
http://www.cs.kuleuven.ac.be/cwis/research/nalag/research/topics/fitpack.html
or
http://www.netlib.org/dierckx/index.html
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
TODO: Make interfaces to the following fitpack functions:
For univariate splines: cocosp, concon, fourco, insert
For bivariate splines: profil, regrid, parsur, surev
"""
from __future__ import division, print_function, absolute_import
__all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde',
'bisplrep', 'bisplev', 'insert', 'splder', 'splantider']
import warnings
import numpy as np
from . import _fitpack
from numpy import (atleast_1d, array, ones, zeros, sqrt, ravel, transpose,
empty, iinfo, intc, asarray)
# Try to replace _fitpack interface with
# f2py-generated version
from . import dfitpack
def _intc_overflow(x, msg=None):
"""Cast the value to an intc and raise an OverflowError if the value
cannot fit.
"""
if x > iinfo(intc).max:
if msg is None:
msg = '%r cannot fit into an intc' % x
raise OverflowError(msg)
return intc(x)
_iermess = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree k.\n"
"fp gives the upper bound fp0 for the smoothing factor s", None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: data (x,y) size is too small or smoothing parameter"
"\ns is too small (fp>s).", ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable cause: s too small. (abs(fp-s)/s>0.001)",
ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
10: ["Error on input data", ValueError],
'unknown': ["An error occurred", TypeError]
}
_iermess2 = {
0: ["The spline has a residual sum of squares fp such that "
"abs(fp-s)/s<=0.001", None],
-1: ["The spline is an interpolating spline (fp=0)", None],
-2: ["The spline is weighted least-squares polynomial of degree kx and ky."
"\nfp gives the upper bound fp0 for the smoothing factor s", None],
-3: ["Warning. The coefficients of the spline have been computed as the\n"
"minimal norm least-squares solution of a rank deficient system.",
None],
1: ["The required storage space exceeds the available storage space.\n"
"Probable causes: nxest or nyest too small or s is too small. (fp>s)",
ValueError],
2: ["A theoretically impossible result when finding a smoothing spline\n"
"with fp = s. Probable causes: s too small or badly chosen eps.\n"
"(abs(fp-s)/s>0.001)", ValueError],
3: ["The maximal number of iterations (20) allowed for finding smoothing\n"
"spline with fp=s has been reached. Probable cause: s too small.\n"
"(abs(fp-s)/s>0.001)", ValueError],
4: ["No more knots can be added because the number of B-spline\n"
"coefficients already exceeds the number of data points m.\n"
"Probable causes: either s or m too small. (fp>s)", ValueError],
5: ["No more knots can be added because the additional knot would\n"
"coincide with an old one. Probable cause: s too small or too large\n"
"a weight to an inaccurate data point. (fp>s)", ValueError],
10: ["Error on input data", ValueError],
11: ["rwrk2 too small, i.e. there is not enough workspace for computing\n"
"the minimal least-squares solution of a rank deficient system of\n"
"linear equations.", ValueError],
'unknown': ["An error occurred", TypeError]
}
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
def splprep(x, w=None, u=None, ub=None, ue=None, k=3, task=0, s=None, t=None,
full_output=0, nest=None, per=0, quiet=1):
"""
Find the B-spline representation of an N-dimensional curve.
Given a list of N rank-1 arrays, `x`, which represent a curve in
N-dimensional space parametrized by `u`, find a smooth approximating
spline curve g(`u`). Uses the FORTRAN routine parcur from FITPACK.
Parameters
----------
x : array_like
A list of sample vector arrays representing the curve.
w : array_like, optional
Strictly positive rank-1 array of weights the same length as `x[0]`.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the `x` values have standard-deviation given by
the vector d, then `w` should be 1/d. Default is ``ones(len(x[0]))``.
u : array_like, optional
An array of parameter values. If not given, these values are
calculated automatically as ``M = len(x[0])``, where
v[0] = 0
v[i] = v[i-1] + distance(`x[i]`, `x[i-1]`)
u[i] = v[i] / v[M-1]
ub, ue : int, optional
The end-points of the parameters interval. Defaults to
u[0] and u[-1].
k : int, optional
Degree of the spline. Cubic splines are recommended.
Even values of `k` should be avoided especially with a small s-value.
``1 <= k <= 5``, default is 3.
task : int, optional
If task==0 (default), find t and c for a given smoothing factor, s.
If task==1, find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1
for the same set of data.
If task=-1 find the weighted least square spline for a given set of
knots, t.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: ``sum((w * (y - g))**2,axis=0) <= s``,
where g(x) is the smoothed interpolation of (x,y). The user can
use `s` to control the trade-off between closeness and smoothness
of fit. Larger `s` means more smoothing while smaller values of `s`
indicate less smoothing. Recommended values of `s` depend on the
weights, w. If the weights represent the inverse of the
standard-deviation of y, then a good `s` value should be found in
the range ``(m-sqrt(2*m),m+sqrt(2*m))``, where m is the number of
data points in x, y, and w.
t : int, optional
The knots needed for task=-1.
full_output : int, optional
If non-zero, then return optional outputs.
nest : int, optional
An over-estimate of the total number of knots of the spline to
help in determining the storage space. By default nest=m/2.
Always large enough is nest=m+k+1.
per : int, optional
If non-zero, data points are considered periodic with period
``x[m-1] - x[0]`` and a smooth periodic spline approximation is
returned. Values of ``y[m-1]`` and ``w[m-1]`` are not used.
quiet : int, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
u : array
An array of the values of the parameter.
fp : float
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated
if ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splrep, splev, sproot, spalde, splint,
bisplrep, bisplev
UnivariateSpline, BivariateSpline
Notes
-----
See `splev` for evaluation of the spline and its derivatives.
The number of dimensions N must be smaller than 11.
References
----------
.. [1] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines, Computer Graphics and Image Processing",
20 (1982) 171-184.
.. [2] P. Dierckx, "Algorithms for smoothing data with periodic and
parametric splines", report tw55, Dept. Computer Science,
K.U.Leuven, 1981.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
if task <= 0:
_parcur_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc), 'u': array([], float),
'ub': 0, 'ue': 1}
x = atleast_1d(x)
idim, m = x.shape
if per:
for i in range(idim):
if x[i][0] != x[i][-1]:
if quiet < 2:
warnings.warn(RuntimeWarning('Setting x[%d][%d]=x[%d][0]' %
(i, m, i)))
x[i][-1] = x[i][0]
if not 0 < idim < 11:
raise TypeError('0 < idim < 11 must hold')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
ipar = (u is not None)
if ipar:
_parcur_cache['u'] = u
if ub is None:
_parcur_cache['ub'] = u[0]
else:
_parcur_cache['ub'] = ub
if ue is None:
_parcur_cache['ue'] = u[-1]
else:
_parcur_cache['ue'] = ue
else:
_parcur_cache['u'] = zeros(m, float)
if not (1 <= k <= 5):
raise TypeError('1 <= k= %d <=5 must hold' % k)
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if (not len(w) == m) or (ipar == 1 and (not len(u) == m)):
raise TypeError('Mismatch of input dimensions')
if s is None:
s = m - sqrt(2*m)
if t is None and task == -1:
raise TypeError('Knots must be given for task=-1')
if t is not None:
_parcur_cache['t'] = atleast_1d(t)
n = len(_parcur_cache['t'])
if task == -1 and n < 2*k + 2:
raise TypeError('There must be at least 2*k+2 knots for task=-1')
if m <= k:
raise TypeError('m > k must hold')
if nest is None:
nest = m + 2*k
if (task >= 0 and s == 0) or (nest < 0):
if per:
nest = m + 2*k
else:
nest = m + k + 1
nest = max(nest, 2*k + 3)
u = _parcur_cache['u']
ub = _parcur_cache['ub']
ue = _parcur_cache['ue']
t = _parcur_cache['t']
wrk = _parcur_cache['wrk']
iwrk = _parcur_cache['iwrk']
t, c, o = _fitpack._parcur(ravel(transpose(x)), w, u, ub, ue, k,
task, ipar, s, t, nest, wrk, iwrk, per)
_parcur_cache['u'] = o['u']
_parcur_cache['ub'] = o['ub']
_parcur_cache['ue'] = o['ue']
_parcur_cache['t'] = t
_parcur_cache['wrk'] = o['wrk']
_parcur_cache['iwrk'] = o['iwrk']
ier = o['ier']
fp = o['fp']
n = len(t)
u = o['u']
c.shape = idim, n - k - 1
tcku = [t, list(c), k], u
if ier <= 0 and not quiet:
warnings.warn(RuntimeWarning(_iermess[ier][0] +
"\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s)))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tcku, fp, ier, _iermess[ier][0]
except KeyError:
return tcku, fp, ier, _iermess['unknown'][0]
else:
return tcku
_curfit_cache = {'t': array([], float), 'wrk': array([], float),
'iwrk': array([], intc)}
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None,
full_output=0, per=0, quiet=1):
"""
Find the B-spline representation of 1-D curve.
Given the set of data points ``(x[i], y[i])`` determine a smooth spline
approximation of degree k on the interval ``xb <= x <= xe``.
Parameters
----------
x, y : array_like
The data points defining a curve y = f(x).
w : array_like, optional
Strictly positive rank-1 array of weights the same length as x and y.
The weights are used in computing the weighted least-squares spline
fit. If the errors in the y values have standard-deviation given by the
vector d, then w should be 1/d. Default is ones(len(x)).
xb, xe : float, optional
The interval to fit. If None, these default to x[0] and x[-1]
respectively.
k : int, optional
The order of the spline fit. It is recommended to use cubic splines.
Even order splines should be avoided especially with small s values.
1 <= k <= 5
task : {1, 0, -1}, optional
If task==0 find t and c for a given smoothing factor, s.
If task==1 find t and c for another value of the smoothing factor, s.
There must have been a previous call with task=0 or task=1 for the same
set of data (t will be stored an used internally)
If task=-1 find the weighted least square spline for a given set of
knots, t. These should be interior knots as knots on the ends will be
added automatically.
s : float, optional
A smoothing condition. The amount of smoothness is determined by
satisfying the conditions: sum((w * (y - g))**2,axis=0) <= s where g(x)
is the smoothed interpolation of (x,y). The user can use s to control
the tradeoff between closeness and smoothness of fit. Larger s means
more smoothing while smaller values of s indicate less smoothing.
Recommended values of s depend on the weights, w. If the weights
represent the inverse of the standard-deviation of y, then a good s
value should be found in the range (m-sqrt(2*m),m+sqrt(2*m)) where m is
the number of datapoints in x, y, and w. default : s=m-sqrt(2*m) if
weights are supplied. s = 0.0 (interpolating) if no weights are
supplied.
t : array_like, optional
The knots needed for task=-1. If given then task is automatically set
to -1.
full_output : bool, optional
If non-zero, then return optional outputs.
per : bool, optional
If non-zero, data points are considered periodic with period x[m-1] -
x[0] and a smooth periodic spline approximation is returned. Values of
y[m-1] and w[m-1] are not used.
quiet : bool, optional
Non-zero to suppress messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : tuple
(t,c,k) a tuple containing the vector of knots, the B-spline
coefficients, and the degree of the spline.
fp : array, optional
The weighted sum of squared residuals of the spline approximation.
ier : int, optional
An integer flag about splrep success. Success is indicated if ier<=0.
If ier in [1,2,3] an error occurred but was not raised. Otherwise an
error is raised.
msg : str, optional
A message corresponding to the integer flag, ier.
Notes
-----
See splev for evaluation of the spline and its derivatives.
The user is responsible for assuring that the values of *x* are unique.
Otherwise, *splrep* will not return sensible results.
See Also
--------
UnivariateSpline, BivariateSpline
splprep, splev, sproot, spalde, splint
bisplrep, bisplev
Notes
-----
See splev for evaluation of the spline and its derivatives. Uses the
FORTRAN routine curfit from FITPACK.
If provided, knots `t` must satisfy the Schoenberg-Whitney conditions,
i.e., there must be a subset of data points ``x[j]`` such that
``t[j] < x[j] < t[j+k+1]``, for ``j=0, 1,...,n-k-2``.
References
----------
Based on algorithms described in [1]_, [2]_, [3]_, and [4]_:
.. [1] P. Dierckx, "An algorithm for smoothing, differentiation and
integration of experimental data using spline functions",
J.Comp.Appl.Maths 1 (1975) 165-184.
.. [2] P. Dierckx, "A fast algorithm for smoothing data on a rectangular
grid while using spline functions", SIAM J.Numer.Anal. 19 (1982)
1286-1304.
.. [3] P. Dierckx, "An improved algorithm for curve fitting with spline
functions", report tw54, Dept. Computer Science,K.U. Leuven, 1981.
.. [4] P. Dierckx, "Curve and surface fitting with splines", Monographs on
Numerical Analysis, Oxford University Press, 1993.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import splev, splrep
>>> x = np.linspace(0, 10, 10)
>>> y = np.sin(x)
>>> tck = splrep(x, y)
>>> x2 = np.linspace(0, 10, 200)
>>> y2 = splev(x2, tck)
>>> plt.plot(x, y, 'o', x2, y2)
>>> plt.show()
"""
if task <= 0:
_curfit_cache = {}
x, y = map(atleast_1d, [x, y])
m = len(x)
if w is None:
w = ones(m, float)
if s is None:
s = 0.0
else:
w = atleast_1d(w)
if s is None:
s = m - sqrt(2*m)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if (m != len(y)) or (m != len(w)):
raise TypeError('Lengths of the first three arguments (x,y,w) must '
'be equal')
if not (1 <= k <= 5):
raise TypeError('Given degree of the spline (k=%d) is not supported. '
'(1<=k<=5)' % k)
if m <= k:
raise TypeError('m > k must hold')
if xb is None:
xb = x[0]
if xe is None:
xe = x[-1]
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if t is not None:
task = -1
if task == -1:
if t is None:
raise TypeError('Knots must be given for task=-1')
numknots = len(t)
_curfit_cache['t'] = empty((numknots + 2*k + 2,), float)
_curfit_cache['t'][k+1:-k-1] = t
nest = len(_curfit_cache['t'])
elif task == 0:
if per:
nest = max(m + 2*k, 2*k + 3)
else:
nest = max(m + k + 1, 2*k + 3)
t = empty((nest,), float)
_curfit_cache['t'] = t
if task <= 0:
if per:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(8 + 5*k),), float)
else:
_curfit_cache['wrk'] = empty((m*(k + 1) + nest*(7 + 3*k),), float)
_curfit_cache['iwrk'] = empty((nest,), intc)
try:
t = _curfit_cache['t']
wrk = _curfit_cache['wrk']
iwrk = _curfit_cache['iwrk']
except KeyError:
raise TypeError("must call with task=1 only after"
" call with task=0,-1")
if not per:
n, c, fp, ier = dfitpack.curfit(task, x, y, w, t, wrk, iwrk,
xb, xe, k, s)
else:
n, c, fp, ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s)
tck = (t[:n], c[:n], k)
if ier <= 0 and not quiet:
_mess = (_iermess[ier][0] + "\tk=%d n=%d m=%d fp=%f s=%f" %
(k, len(t), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ier > 0 and not full_output:
if ier in [1, 2, 3]:
warnings.warn(RuntimeWarning(_iermess[ier][0]))
else:
try:
raise _iermess[ier][1](_iermess[ier][0])
except KeyError:
raise _iermess['unknown'][1](_iermess['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess[ier][0]
except KeyError:
return tck, fp, ier, _iermess['unknown'][0]
else:
return tck
def splev(x, tck, der=0, ext=0):
"""
Evaluate a B-spline or its derivatives.
Given the knots and coefficients of a B-spline representation, evaluate
the value of the smoothing polynomial and its derivatives. This is a
wrapper around the FORTRAN routines splev and splder of FITPACK.
Parameters
----------
x : array_like
An array of points at which to return the value of the smoothed
spline or its derivatives. If `tck` was returned from `splprep`,
then the parameter values, u should be given.
tck : tuple
A sequence of length 3 returned by `splrep` or `splprep` containing
the knots, coefficients, and degree of the spline.
der : int, optional
The order of derivative of the spline to compute (must be less than
or equal to k).
ext : int, optional
Controls the value returned for elements of ``x`` not in the
interval defined by the knot sequence.
* if ext=0, return the extrapolated value.
* if ext=1, return 0
* if ext=2, raise a ValueError
* if ext=3, return the boundary value.
The default value is 0.
Returns
-------
y : ndarray or list of ndarrays
An array of values representing the spline function evaluated at
the points in ``x``. If `tck` was returned from `splprep`, then this
is a list of arrays representing the curve in N-dimensional space.
See Also
--------
splprep, splrep, sproot, spalde, splint
bisplrep, bisplev
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k, der=der:
splev(x, [t, c, k], der, ext), c))
else:
if not (0 <= der <= k):
raise ValueError("0<=der=%d<=k=%d must hold" % (der, k))
if ext not in (0, 1, 2, 3):
raise ValueError("ext = %s not in (0, 1, 2, 3) " % ext)
x = asarray(x)
shape = x.shape
x = atleast_1d(x).ravel()
y, ier = _fitpack._spl_(x, der, t, c, k, ext)
if ier == 10:
raise ValueError("Invalid input data")
if ier == 1:
raise ValueError("Found x value not in the domain")
if ier:
raise TypeError("An error occurred")
return y.reshape(shape)
def splint(a, b, tck, full_output=0):
"""
Evaluate the definite integral of a B-spline.
Given the knots and coefficients of a B-spline, evaluate the definite
integral of the smoothing polynomial between two given points.
Parameters
----------
a, b : float
The end-points of the integration interval.
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the spline (see `splev`).
full_output : int, optional
Non-zero to return optional output.
Returns
-------
integral : float
The resulting integral.
wrk : ndarray
An array containing the integrals of the normalized B-splines
defined on the set of knots.
Notes
-----
splint silently assumes that the spline function is zero outside the data
interval (a, b).
See Also
--------
splprep, splrep, sproot, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] P.W. Gaffney, The calculation of indefinite integrals of b-splines",
J. Inst. Maths Applics, 17, p.37-41, 1976.
.. [2] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, a=a, b=b, t=t, k=k:
splint(a, b, [t, c, k]), c))
else:
aint, wrk = _fitpack._splint(t, c, k, a, b)
if full_output:
return aint, wrk
else:
return aint
def sproot(tck, mest=10):
"""
Find the roots of a cubic B-spline.
Given the knots (>=8) and coefficients of a cubic B-spline return the
roots of the spline.
Parameters
----------
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
The number of knots must be >= 8, and the degree must be 3.
The knots must be a montonically increasing sequence.
mest : int, optional
An estimate of the number of zeros (Default is 10).
Returns
-------
zeros : ndarray
An array giving the roots of the spline.
See also
--------
splprep, splrep, splint, spalde, splev
bisplrep, bisplev
UnivariateSpline, BivariateSpline
References
----------
.. [1] C. de Boor, "On calculating with b-splines", J. Approximation
Theory, 6, p.50-62, 1972.
.. [2] M.G. Cox, "The numerical evaluation of b-splines", J. Inst. Maths
Applics, 10, p.134-149, 1972.
.. [3] P. Dierckx, "Curve and surface fitting with splines", Monographs
on Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
if k != 3:
raise ValueError("sproot works only for cubic (k=3) splines")
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, t=t, k=k, mest=mest:
sproot([t, c, k], mest), c))
else:
if len(t) < 8:
raise TypeError("The number of knots %d>=8" % len(t))
z, ier = _fitpack._sproot(t, c, k, mest)
if ier == 10:
raise TypeError("Invalid input data. "
"t1<=..<=t4<t5<..<tn-3<=..<=tn must hold.")
if ier == 0:
return z
if ier == 1:
warnings.warn(RuntimeWarning("The number of zeros exceeds mest"))
return z
raise TypeError("Unknown error")
def spalde(x, tck):
"""
Evaluate all derivatives of a B-spline.
Given the knots and coefficients of a cubic B-spline compute all
derivatives up to order k at a point (or set of points).
Parameters
----------
x : array_like
A point or a set of points at which to evaluate the derivatives.
Note that ``t(k) <= x <= t(n-k+1)`` must hold for each `x`.
tck : tuple
A tuple (t,c,k) containing the vector of knots,
the B-spline coefficients, and the degree of the spline.
Returns
-------
results : {ndarray, list of ndarrays}
An array (or a list of arrays) containing all derivatives
up to order k inclusive for each point `x`.
See Also
--------
splprep, splrep, splint, sproot, splev, bisplrep, bisplev,
UnivariateSpline, BivariateSpline
References
----------
.. [1] de Boor C : On calculating with b-splines, J. Approximation Theory
6 (1972) 50-62.
.. [2] Cox M.G. : The numerical evaluation of b-splines, J. Inst. Maths
applics 10 (1972) 134-149.
.. [3] Dierckx P. : Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
return list(map(lambda c, x=x, t=t, k=k:
spalde(x, [t, c, k]), c))
else:
x = atleast_1d(x)
if len(x) > 1:
return list(map(lambda x, tck=tck: spalde(x, tck), x))
d, ier = _fitpack._spalde(t, c, k, x[0])
if ier == 0:
return d
if ier == 10:
raise TypeError("Invalid input data. t(k)<=x<=t(n-k+1) must hold.")
raise TypeError("Unknown error")
# def _curfit(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None,
# full_output=0,nest=None,per=0,quiet=1):
_surfit_cache = {'tx': array([], float), 'ty': array([], float),
'wrk': array([], float), 'iwrk': array([], intc)}
def bisplrep(x, y, z, w=None, xb=None, xe=None, yb=None, ye=None,
kx=3, ky=3, task=0, s=None, eps=1e-16, tx=None, ty=None,
full_output=0, nxest=None, nyest=None, quiet=1):
"""
Find a bivariate B-spline representation of a surface.
Given a set of data points (x[i], y[i], z[i]) representing a surface
z=f(x,y), compute a B-spline representation of the surface. Based on
the routine SURFIT from FITPACK.
Parameters
----------
x, y, z : ndarray
Rank-1 arrays of data points.
w : ndarray, optional
Rank-1 array of weights. By default ``w=np.ones(len(x))``.
xb, xe : float, optional
End points of approximation interval in `x`.
By default ``xb = x.min(), xe=x.max()``.
yb, ye : float, optional
End points of approximation interval in `y`.
By default ``yb=y.min(), ye = y.max()``.
kx, ky : int, optional
The degrees of the spline (1 <= kx, ky <= 5).
Third order (kx=ky=3) is recommended.
task : int, optional
If task=0, find knots in x and y and coefficients for a given
smoothing factor, s.
If task=1, find knots and coefficients for another value of the
smoothing factor, s. bisplrep must have been previously called
with task=0 or task=1.
If task=-1, find coefficients for a given set of knots tx, ty.
s : float, optional
A non-negative smoothing factor. If weights correspond
to the inverse of the standard-deviation of the errors in z,
then a good s-value should be found in the range
``(m-sqrt(2*m),m+sqrt(2*m))`` where m=len(x).
eps : float, optional
A threshold for determining the effective rank of an
over-determined linear system of equations (0 < eps < 1).
`eps` is not likely to need changing.
tx, ty : ndarray, optional
Rank-1 arrays of the knots of the spline for task=-1
full_output : int, optional
Non-zero to return optional outputs.
nxest, nyest : int, optional
Over-estimates of the total number of knots. If None then
``nxest = max(kx+sqrt(m/2),2*kx+3)``,
``nyest = max(ky+sqrt(m/2),2*ky+3)``.
quiet : int, optional
Non-zero to suppress printing of messages.
This parameter is deprecated; use standard Python warning filters
instead.
Returns
-------
tck : array_like
A list [tx, ty, c, kx, ky] containing the knots (tx, ty) and
coefficients (c) of the bivariate B-spline representation of the
surface along with the degree of the spline.
fp : ndarray
The weighted sum of squared residuals of the spline approximation.
ier : int
An integer flag about splrep success. Success is indicated if
ier<=0. If ier in [1,2,3] an error occurred but was not raised.
Otherwise an error is raised.
msg : str
A message corresponding to the integer flag, ier.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplev` to evaluate the value of the B-spline given its tck
representation.
References
----------
.. [1] Dierckx P.:An algorithm for surface fitting with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P.:An algorithm for surface fitting with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P.:Curve and surface fitting with splines, Monographs on
Numerical Analysis, Oxford University Press, 1993.
"""
x, y, z = map(ravel, [x, y, z]) # ensure 1-d arrays.
m = len(x)
if not (m == len(y) == len(z)):
raise TypeError('len(x)==len(y)==len(z) must hold.')
if w is None:
w = ones(m, float)
else:
w = atleast_1d(w)
if not len(w) == m:
raise TypeError('len(w)=%d is not equal to m=%d' % (len(w), m))
if xb is None:
xb = x.min()
if xe is None:
xe = x.max()
if yb is None:
yb = y.min()
if ye is None:
ye = y.max()
if not (-1 <= task <= 1):
raise TypeError('task must be -1, 0 or 1')
if s is None:
s = m - sqrt(2*m)
if tx is None and task == -1:
raise TypeError('Knots_x must be given for task=-1')
if tx is not None:
_surfit_cache['tx'] = atleast_1d(tx)
nx = len(_surfit_cache['tx'])
if ty is None and task == -1:
raise TypeError('Knots_y must be given for task=-1')
if ty is not None:
_surfit_cache['ty'] = atleast_1d(ty)
ny = len(_surfit_cache['ty'])
if task == -1 and nx < 2*kx+2:
raise TypeError('There must be at least 2*kx+2 knots_x for task=-1')
if task == -1 and ny < 2*ky+2:
raise TypeError('There must be at least 2*ky+2 knots_x for task=-1')
if not ((1 <= kx <= 5) and (1 <= ky <= 5)):
raise TypeError('Given degree of the spline (kx,ky=%d,%d) is not '
'supported. (1<=k<=5)' % (kx, ky))
if m < (kx + 1)*(ky + 1):
raise TypeError('m >= (kx+1)(ky+1) must hold')
if nxest is None:
nxest = int(kx + sqrt(m/2))
if nyest is None:
nyest = int(ky + sqrt(m/2))
nxest, nyest = max(nxest, 2*kx + 3), max(nyest, 2*ky + 3)
if task >= 0 and s == 0:
nxest = int(kx + sqrt(3*m))
nyest = int(ky + sqrt(3*m))
if task == -1:
_surfit_cache['tx'] = atleast_1d(tx)
_surfit_cache['ty'] = atleast_1d(ty)
tx, ty = _surfit_cache['tx'], _surfit_cache['ty']
wrk = _surfit_cache['wrk']
u = nxest - kx - 1
v = nyest - ky - 1
km = max(kx, ky) + 1
ne = max(nxest, nyest)
bx, by = kx*v + ky + 1, ky*u + kx + 1
b1, b2 = bx, bx + v - ky
if bx > by:
b1, b2 = by, by + u - kx
msg = "Too many data points to interpolate"
lwrk1 = _intc_overflow(u*v*(2 + b1 + b2) +
2*(u + v + km*(m + ne) + ne - kx - ky) + b2 + 1,
msg=msg)
lwrk2 = _intc_overflow(u*v*(b2 + 1) + b2, msg=msg)
tx, ty, c, o = _fitpack._surfit(x, y, z, w, xb, xe, yb, ye, kx, ky,
task, s, eps, tx, ty, nxest, nyest,
wrk, lwrk1, lwrk2)
_curfit_cache['tx'] = tx
_curfit_cache['ty'] = ty
_curfit_cache['wrk'] = o['wrk']
ier, fp = o['ier'], o['fp']
tck = [tx, ty, c, kx, ky]
ierm = min(11, max(-3, ier))
if ierm <= 0 and not quiet:
_mess = (_iermess2[ierm][0] +
"\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_mess))
if ierm > 0 and not full_output:
if ier in [1, 2, 3, 4, 5]:
_mess = ("\n\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f" %
(kx, ky, len(tx), len(ty), m, fp, s))
warnings.warn(RuntimeWarning(_iermess2[ierm][0] + _mess))
else:
try:
raise _iermess2[ierm][1](_iermess2[ierm][0])
except KeyError:
raise _iermess2['unknown'][1](_iermess2['unknown'][0])
if full_output:
try:
return tck, fp, ier, _iermess2[ierm][0]
except KeyError:
return tck, fp, ier, _iermess2['unknown'][0]
else:
return tck
def bisplev(x, y, tck, dx=0, dy=0):
"""
Evaluate a bivariate B-spline and its derivatives.
Return a rank-2 array of spline function values (or spline derivative
values) at points given by the cross-product of the rank-1 arrays `x` and
`y`. In special cases, return an array or just a float if either `x` or
`y` or both are floats. Based on BISPEV from FITPACK.
Parameters
----------
x, y : ndarray
Rank-1 arrays specifying the domain over which to evaluate the
spline or its derivative.
tck : tuple
A sequence of length 5 returned by `bisplrep` containing the knot
locations, the coefficients, and the degree of the spline:
[tx, ty, c, kx, ky].
dx, dy : int, optional
The orders of the partial derivatives in `x` and `y` respectively.
Returns
-------
vals : ndarray
The B-spline or its derivative evaluated over the set formed by
the cross-product of `x` and `y`.
See Also
--------
splprep, splrep, splint, sproot, splev
UnivariateSpline, BivariateSpline
Notes
-----
See `bisplrep` to generate the `tck` representation.
References
----------
.. [1] Dierckx P. : An algorithm for surface fitting
with spline functions
Ima J. Numer. Anal. 1 (1981) 267-283.
.. [2] Dierckx P. : An algorithm for surface fitting
with spline functions
report tw50, Dept. Computer Science,K.U.Leuven, 1980.
.. [3] Dierckx P. : Curve and surface fitting with splines,
Monographs on Numerical Analysis, Oxford University Press, 1993.
"""
tx, ty, c, kx, ky = tck
if not (0 <= dx < kx):
raise ValueError("0 <= dx = %d < kx = %d must hold" % (dx, kx))
if not (0 <= dy < ky):
raise ValueError("0 <= dy = %d < ky = %d must hold" % (dy, ky))
x, y = map(atleast_1d, [x, y])
if (len(x.shape) != 1) or (len(y.shape) != 1):
raise ValueError("First two entries should be rank-1 arrays.")
z, ier = _fitpack._bispev(tx, ty, c, kx, ky, x, y, dx, dy)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
z.shape = len(x), len(y)
if len(z) > 1:
return z
if len(z[0]) > 1:
return z[0]
return z[0][0]
def dblint(xa, xb, ya, yb, tck):
"""Evaluate the integral of a spline over area [xa,xb] x [ya,yb].
Parameters
----------
xa, xb : float
The end-points of the x integration interval.
ya, yb : float
The end-points of the y integration interval.
tck : list [tx, ty, c, kx, ky]
A sequence of length 5 returned by bisplrep containing the knot
locations tx, ty, the coefficients c, and the degrees kx, ky
of the spline.
Returns
-------
integ : float
The value of the resulting integral.
"""
tx, ty, c, kx, ky = tck
return dfitpack.dblint(tx, ty, c, kx, ky, xa, xb, ya, yb)
def insert(x, tck, m=1, per=0):
"""
Insert knots into a B-spline.
Given the knots and coefficients of a B-spline representation, create a
new B-spline with a knot inserted `m` times at point `x`.
This is a wrapper around the FORTRAN routine insert of FITPACK.
Parameters
----------
x (u) : array_like
A 1-D point at which to insert a new knot(s). If `tck` was returned
from ``splprep``, then the parameter values, u should be given.
tck : tuple
A tuple (t,c,k) returned by ``splrep`` or ``splprep`` containing
the vector of knots, the B-spline coefficients,
and the degree of the spline.
m : int, optional
The number of times to insert the given knot (its multiplicity).
Default is 1.
per : int, optional
If non-zero, the input spline is considered periodic.
Returns
-------
tck : tuple
A tuple (t,c,k) containing the vector of knots, the B-spline
coefficients, and the degree of the new spline.
``t(k+1) <= x <= t(n-k)``, where k is the degree of the spline.
In case of a periodic spline (``per != 0``) there must be
either at least k interior knots t(j) satisfying ``t(k+1)<t(j)<=x``
or at least k interior knots t(j) satisfying ``x<=t(j)<t(n-k)``.
Notes
-----
Based on algorithms from [1]_ and [2]_.
References
----------
.. [1] W. Boehm, "Inserting new knots into b-spline curves.",
Computer Aided Design, 12, p.199-201, 1980.
.. [2] P. Dierckx, "Curve and surface fitting with splines, Monographs on
Numerical Analysis", Oxford University Press, 1993.
"""
t, c, k = tck
try:
c[0][0]
parametric = True
except:
parametric = False
if parametric:
cc = []
for c_vals in c:
tt, cc_val, kk = insert(x, [t, c_vals, k], m)
cc.append(cc_val)
return (tt, cc, kk)
else:
tt, cc, ier = _fitpack._insert(per, t, c, k, x, m)
if ier == 10:
raise ValueError("Invalid input data")
if ier:
raise TypeError("An error occurred")
return (tt, cc, k)
def splder(tck, n=1):
"""
Compute the spline representation of the derivative of a given spline
Parameters
----------
tck : tuple of (t, c, k)
Spline whose derivative to compute
n : int, optional
Order of derivative to evaluate. Default: 1
Returns
-------
tck_der : tuple of (t2, c2, k2)
Spline of order k2=k-n representing the derivative
of the input spline.
Notes
-----
.. versionadded:: 0.13.0
See Also
--------
splantider, splev, spalde
Examples
--------
This can be used for finding maxima of a curve:
>>> from scipy.interpolate import splrep, splder, sproot
>>> x = np.linspace(0, 10, 70)
>>> y = np.sin(x)
>>> spl = splrep(x, y, k=4)
Now, differentiate the spline and find the zeros of the
derivative. (NB: `sproot` only works for order 3 splines, so we
fit an order 4 spline):
>>> dspl = splder(spl)
>>> sproot(dspl) / np.pi
array([ 0.50000001, 1.5 , 2.49999998])
This agrees well with roots :math:`\\pi/2 + n\\pi` of
:math:`\\cos(x) = \\sin'(x)`.
"""
if n < 0:
return splantider(tck, -n)
t, c, k = tck
if n > k:
raise ValueError(("Order of derivative (n = %r) must be <= "
"order of spline (k = %r)") % (n, tck[2]))
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + ((None,)*len(c.shape[1:]))
with np.errstate(invalid='raise', divide='raise'):
try:
for j in range(n):
# See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5
# Compute the denominator in the differentiation formula.
# (and append traling dims, if necessary)
dt = t[k+1:-1] - t[1:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = (c[1:-1-k] - c[:-2-k]) * k / dt
# Pad coefficient array to same size as knots (FITPACK
# convention)
c = np.r_[c, np.zeros((k,) + c.shape[1:])]
# Adjust knots
t = t[1:-1]
k -= 1
except FloatingPointError:
raise ValueError(("The spline has internal repeated knots "
"and is not differentiable %d times") % n)
return t, c, k
def splantider(tck, n=1):
"""
Compute the spline for the antiderivative (integral) of a given spline.
Parameters
----------
tck : tuple of (t, c, k)
Spline whose antiderivative to compute
n : int, optional
Order of antiderivative to evaluate. Default: 1
Returns
-------
tck_ader : tuple of (t2, c2, k2)
Spline of order k2=k+n representing the antiderivative of the input
spline.
See Also
--------
splder, splev, spalde
Notes
-----
The `splder` function is the inverse operation of this function.
Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo
rounding error.
.. versionadded:: 0.13.0
Examples
--------
>>> from scipy.interpolate import splrep, splder, splantider, splev
>>> x = np.linspace(0, np.pi/2, 70)
>>> y = 1 / np.sqrt(1 - 0.8*np.sin(x)**2)
>>> spl = splrep(x, y)
The derivative is the inverse operation of the antiderivative,
although some floating point error accumulates:
>>> splev(1.7, spl), splev(1.7, splder(splantider(spl)))
(array(2.1565429877197317), array(2.1565429877201865))
Antiderivative can be used to evaluate definite integrals:
>>> ispl = splantider(spl)
>>> splev(np.pi/2, ispl) - splev(0, ispl)
2.2572053588768486
This is indeed an approximation to the complete elliptic integral
:math:`K(m) = \\int_0^{\\pi/2} [1 - m\\sin^2 x]^{-1/2} dx`:
>>> from scipy.special import ellipk
>>> ellipk(0.8)
2.2572053268208538
"""
if n < 0:
return splder(tck, -n)
t, c, k = tck
# Extra axes for the trailing dims of the `c` array:
sh = (slice(None),) + (None,)*len(c.shape[1:])
for j in range(n):
# This is the inverse set of operations to splder.
# Compute the multiplier in the antiderivative formula.
dt = t[k+1:] - t[:-k-1]
dt = dt[sh]
# Compute the new coefficients
c = np.cumsum(c[:-k-1] * dt, axis=0) / (k + 1)
c = np.r_[np.zeros((1,) + c.shape[1:]),
c,
[c[-1]] * (k+2)]
# New knots
t = np.r_[t[0], t, t[-1]]
k += 1
return t, c, k
| 46,543 | 34.47561 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/__init__.py
|
"""========================================
Interpolation (:mod:`scipy.interpolate`)
========================================
.. currentmodule:: scipy.interpolate
Sub-package for objects used in interpolation.
As listed below, this sub-package contains spline functions and classes,
one-dimensional and multi-dimensional (univariate and multivariate)
interpolation classes, Lagrange and Taylor polynomial interpolators, and
wrappers for `FITPACK <http://www.netlib.org/dierckx/>`__
and DFITPACK functions.
Univariate interpolation
========================
.. autosummary::
:toctree: generated/
interp1d
BarycentricInterpolator
KroghInterpolator
PchipInterpolator
barycentric_interpolate
krogh_interpolate
pchip_interpolate
Akima1DInterpolator
CubicSpline
PPoly
BPoly
Multivariate interpolation
==========================
Unstructured data:
.. autosummary::
:toctree: generated/
griddata
LinearNDInterpolator
NearestNDInterpolator
CloughTocher2DInterpolator
Rbf
interp2d
For data on a grid:
.. autosummary::
:toctree: generated/
interpn
RegularGridInterpolator
RectBivariateSpline
.. seealso::
`scipy.ndimage.map_coordinates`
Tensor product polynomials:
.. autosummary::
:toctree: generated/
NdPPoly
1-D Splines
===========
.. autosummary::
:toctree: generated/
BSpline
make_interp_spline
make_lsq_spline
Functional interface to FITPACK routines:
.. autosummary::
:toctree: generated/
splrep
splprep
splev
splint
sproot
spalde
splder
splantider
insert
Object-oriented FITPACK interface:
.. autosummary::
:toctree: generated/
UnivariateSpline
InterpolatedUnivariateSpline
LSQUnivariateSpline
2-D Splines
===========
For data on a grid:
.. autosummary::
:toctree: generated/
RectBivariateSpline
RectSphereBivariateSpline
For unstructured data:
.. autosummary::
:toctree: generated/
BivariateSpline
SmoothBivariateSpline
SmoothSphereBivariateSpline
LSQBivariateSpline
LSQSphereBivariateSpline
Low-level interface to FITPACK functions:
.. autosummary::
:toctree: generated/
bisplrep
bisplev
Additional tools
================
.. autosummary::
:toctree: generated/
lagrange
approximate_taylor_polynomial
pade
.. seealso::
`scipy.ndimage.map_coordinates`,
`scipy.ndimage.spline_filter`,
`scipy.signal.resample`,
`scipy.signal.bspline`,
`scipy.signal.gauss_spline`,
`scipy.signal.qspline1d`,
`scipy.signal.cspline1d`,
`scipy.signal.qspline1d_eval`,
`scipy.signal.cspline1d_eval`,
`scipy.signal.qspline2d`,
`scipy.signal.cspline2d`.
Functions existing for backward compatibility (should not be used in
new code):
.. autosummary::
:toctree: generated/
spleval
spline
splmake
spltopp
pchip
"""
from __future__ import division, print_function, absolute_import
from .interpolate import *
from .fitpack import *
# New interface to fitpack library:
from .fitpack2 import *
from .rbf import Rbf
from .polyint import *
from ._cubic import *
from .ndgriddata import *
from ._bsplines import *
from ._pade import *
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,336 | 15.853535 | 72 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/_cubic.py
|
"""Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
:doi:`10.1137/0717021`.
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
:doi:`10.1137/1.9780898717952`
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| 29,281 | 36.979248 | 85 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/interpnd_info.py
|
"""
Here we perform some symbolic computations required for the N-D
interpolation routines in `interpnd.pyx`.
"""
from __future__ import division, print_function, absolute_import
from sympy import symbols, binomial, Matrix
def _estimate_gradients_2d_global():
#
# Compute
#
#
f1, f2, df1, df2, x = symbols(['f1', 'f2', 'df1', 'df2', 'x'])
c = [f1, (df1 + 3*f1)/3, (df2 + 3*f2)/3, f2]
w = 0
for k in range(4):
w += binomial(3, k) * c[k] * x**k*(1-x)**(3-k)
wpp = w.diff(x, 2).expand()
intwpp2 = (wpp**2).integrate((x, 0, 1)).expand()
A = Matrix([[intwpp2.coeff(df1**2), intwpp2.coeff(df1*df2)/2],
[intwpp2.coeff(df1*df2)/2, intwpp2.coeff(df2**2)]])
B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)],
[intwpp2.coeff(df2).subs(df1, 0)]]) / 2
print("A")
print(A)
print("B")
print(B)
print("solution")
print(A.inv() * B)
| 935 | 22.4 | 67 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/interpolate_wrapper.py
|
""" helper_funcs.py.
scavenged from enthought,interpolate
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from . import _interpolate # C extension. Does all the real work.
def atleast_1d_and_contiguous(ary, dtype=np.float64):
return np.atleast_1d(np.ascontiguousarray(ary, dtype))
@np.deprecate(message="'nearest' is deprecated in SciPy 1.0.0")
def nearest(x, y, new_x):
"""
Rounds each new x to nearest input x and returns corresponding input y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to return the interpolate y values.
Returns
-------
nearest : ndarray
Rounds each `new_x` to nearest `x` and returns the corresponding `y`.
"""
shifted_x = np.concatenate((np.array([x[0]-1]), x[0:-1]))
midpoints_of_x = atleast_1d_and_contiguous(.5*(x + shifted_x))
new_x = atleast_1d_and_contiguous(new_x)
TINY = 1e-10
indices = np.searchsorted(midpoints_of_x, new_x+TINY)-1
indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int))
new_y = np.take(y, indices, axis=-1)
return new_y
@np.deprecate(message="'linear' is deprecated in SciPy 1.0.0")
def linear(x, y, new_x):
"""
Linearly interpolates values in new_x based on the values in x and y
Parameters
----------
x : array_like
Independent values
y : array_like
Dependent values
new_x : array_like
The x values to return the interpolated y values.
"""
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)): # for each row
_interpolate.linear_dddd(x, y[i], new_x, new_y[i])
else:
new_y = np.zeros(len(new_x), np.float64)
_interpolate.linear_dddd(x, y, new_x, new_y)
return new_y
@np.deprecate(message="'logarithmic' is deprecated in SciPy 1.0.0")
def logarithmic(x, y, new_x):
"""
Linearly interpolates values in new_x based in the log space of y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to return interpolated y values at.
"""
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)):
_interpolate.loginterp_dddd(x, y[i], new_x, new_y[i])
else:
new_y = np.zeros(len(new_x), np.float64)
_interpolate.loginterp_dddd(x, y, new_x, new_y)
return new_y
@np.deprecate(message="'block_average_above' is deprecated in SciPy 1.0.0")
def block_average_above(x, y, new_x):
"""
Linearly interpolates values in new_x based on the values in x and y.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values to interpolate y values.
"""
bad_index = None
x = atleast_1d_and_contiguous(x, np.float64)
y = atleast_1d_and_contiguous(y, np.float64)
new_x = atleast_1d_and_contiguous(new_x, np.float64)
if y.ndim > 2:
raise ValueError("`linear` only works with 1-D or 2-D arrays.")
if len(y.shape) == 2:
new_y = np.zeros((y.shape[0], len(new_x)), np.float64)
for i in range(len(new_y)):
bad_index = _interpolate.block_averave_above_dddd(x, y[i],
new_x, new_y[i])
if bad_index is not None:
break
else:
new_y = np.zeros(len(new_x), np.float64)
bad_index = _interpolate.block_average_above_dddd(x, y, new_x, new_y)
if bad_index is not None:
msg = "block_average_above cannot extrapolate and new_x[%d]=%f "\
"is out of the x range (%f, %f)" % \
(bad_index, new_x[bad_index], x[0], x[-1])
raise ValueError(msg)
return new_y
@np.deprecate(message="'block' is deprecated in SciPy 1.0.0")
def block(x, y, new_x):
"""
Essentially a step function.
For each `new_x`, finds largest j such that``x[j] < new_x[j]`` and
returns ``y[j]``.
Parameters
----------
x : array_like
Independent values.
y : array_like
Dependent values.
new_x : array_like
The x values used to calculate the interpolated y.
Returns
-------
block : ndarray
Return array, of same length as `x_new`.
"""
# find index of values in x that precede values in x
# This code is a little strange -- we really want a routine that
# returns the index of values where x[j] < x[index]
TINY = 1e-10
indices = np.searchsorted(x, new_x+TINY)-1
# If the value is at the front of the list, it'll have -1.
# In this case, we will use the first (0), element in the array.
# take requires the index array to be an Int
indices = np.atleast_1d(np.clip(indices, 0, np.Inf).astype(int))
new_y = np.take(y, indices, axis=-1)
return new_y
| 5,637 | 28.989362 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/interpolate.py
|
""" Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'previous', 'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbour of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
# When `spltopp` gets removed, also remove the _ppform class.
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return _ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| 103,293 | 34.374658 | 111 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/ndgriddata.py
|
"""
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(x, y)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : 2-D ndarray of float or tuple of 1-D array, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tessellate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Returns
-------
ndarray
Array of interpolated values.
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| 7,557 | 31.718615 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpolate.py
|
from __future__ import division, print_function, absolute_import
import itertools
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose)
from pytest import raises as assert_raises
import pytest
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn, NdPPoly, BSpline)
from scipy.special import poch, gamma
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
from scipy.integrate import nquad
from scipy.special import binom
class TestInterp2D(object):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setup_method(self):
self.x5 = np.arange(5.)
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.y225 = np.arange(20.).reshape((2, 2, 5))
self.y25 = np.arange(10.).reshape((2, 5))
self.y235 = np.arange(30.).reshape((2, 3, 5))
self.y325 = np.arange(30.).reshape((3, 2, 5))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
for kind in ('nearest', 'zero', 'linear', 'slinear', 'quadratic',
'cubic', 'previous', 'next'):
interp1d(self.x10, self.y10, kind=kind)
interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
interp1d(self.x10, self.y10, kind='linear',
fill_value=np.array([-1]))
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1,))
interp1d(self.x10, self.y10, kind='linear',
fill_value=-1)
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1, -1))
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
interp1d(self.x10, self.y210, kind='linear', axis=-1,
fill_value=(-1, -1))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=np.ones(10))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), np.ones(10)))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), -1))
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
# Bad fill values
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=(-1, -1, -1)) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array((-1, -1, -1))) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[[-1]]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array([])) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=()) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
3.0)
assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
(1.0, 2.0))
assert_equal(interp1d(self.x10, self.y10).axis, 0)
assert_equal(interp1d(self.x10, self.y210).axis, 1)
assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
def test_assume_sorted(self):
# Check for unsorted arrays
interp10 = interp1d(self.x10, self.y10)
interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
interp10([2.4, 5.6, 6.0]))
# Check assume_sorted keyword (defaults to False)
interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=False)
assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=True)
# Should raise an error for unsorted input if assume_sorted=True
assert_raises(ValueError, interp10_assume_kw2, self.x10)
# Check that if y is a 2-D array, things are still consistent
interp10_y_2d = interp1d(self.x10, self.y210)
interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
assert_array_almost_equal(interp10_y_2d(self.x10),
interp10_y_2d_unsorted(self.x10))
def test_linear(self):
for kind in ['linear', 'slinear']:
self._check_linear(kind)
def _check_linear(self, kind):
# Check the actual implementation of linear interpolation.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]))
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind=kind,
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[-1, 0, 9, 11], rtol=1e-14)
opts = dict(kind=kind,
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_linear_dtypes(self):
# regression test for gh-5898, where 1D linear interpolation has been
# delegated to numpy.interp for all float dtypes, and the latter was
# not handling e.g. np.float128.
for dtyp in np.sctypes["float"]:
x = np.arange(8, dtype=dtyp)
y = x
yp = interp1d(x, y, kind='linear')(x)
assert_equal(yp.dtype, dtyp)
assert_allclose(yp, y, atol=1e-15)
def test_slinear_dtypes(self):
# regression test for gh-7273: 1D slinear interpolation fails with
# float32 inputs
dt_r = [np.float16, np.float32, np.float64]
dt_rc = dt_r + [np.complex64, np.complex128]
spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
for dtx in dt_r:
x = np.arange(0, 10, dtype=dtx)
for dty in dt_rc:
y = np.exp(-x/3.0).astype(dty)
for dtn in dt_r:
xnew = x.astype(dtn)
for kind in spline_kinds:
f = interp1d(x, y, kind=kind, bounds_error=False)
assert_allclose(f(xnew), y, atol=1e-7,
err_msg="%s, %s %s" % (dtx, dty, dtn))
def test_cubic(self):
# Check the actual implementation of spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='cubic')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]),)
def test_nearest(self):
# Check the actual implementation of nearest-neighbour interpolation.
interp10 = interp1d(self.x10, self.y10, kind='nearest')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]),)
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind='nearest',
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[0, 0, 9, 9], rtol=1e-14)
opts = dict(kind='nearest',
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_previous(self):
# Check the actual implementation of previous interpolation.
interp10 = interp1d(self.x10, self.y10, kind='previous')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 5., 6.]),)
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind='previous',
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[0, 0, 9, 9], rtol=1e-14)
opts = dict(kind='previous',
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_next(self):
# Check the actual implementation of next interpolation.
interp10 = interp1d(self.x10, self.y10, kind='next')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(2.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([3., 6., 6.]),)
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind='next',
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[0, 0, 9, 9], rtol=1e-14)
opts = dict(kind='next',
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_zero(self):
# Check the actual implementation of zero-order spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='zero')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 5., 6.]))
def _bounds_check(self, kind='linear'):
# Test that our handling of out-of-bounds input is correct.
extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
bounds_error=False, kind=kind)
assert_array_equal(extrap10(11.2), np.array(self.fill_value))
assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
np.array(self.fill_value),)
assert_array_equal(extrap10._check_bounds(
np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
np.array([[True, False, False, False, False],
[False, False, False, False, True]]))
raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
kind=kind)
assert_raises(ValueError, raises_bounds_error, -1.0)
assert_raises(ValueError, raises_bounds_error, 11.0)
raises_bounds_error([0.0, 5.0, 9.0])
def _bounds_check_int_nan_fill(self, kind='linear'):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
yi = c(x - 1)
assert_(np.isnan(yi[0]))
assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
def test_bounds(self):
for kind in ('linear', 'cubic', 'nearest', 'previous', 'next',
'slinear', 'zero', 'quadratic'):
self._bounds_check(kind)
self._bounds_check_int_nan_fill(kind)
def _check_fill_value(self, kind):
interp = interp1d(self.x10, self.y10, kind=kind,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
assert_array_almost_equal(interp([-10, 10]), [-100, 100])
# Proper broadcasting:
# interp along axis of length 5
# other dim=(2, 3), (3, 2), (2, 2), or (2,)
# one singleton fill_value (works for all)
for y in (self.y235, self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=100, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), 100)
assert_array_almost_equal(interp([-10, 10]), 100)
# singleton lower, singleton upper
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
if y.ndim == 3:
result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
else:
result = [[-100, 100]] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (3,) fill_value
fill_value = [100, 200, 300]
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200],
[300, 300]]] * 2)
# one broadcastable (2,) fill_value
fill_value = [100, 200]
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
assert_array_almost_equal(interp(-10), result)
result = [[100, 100], [200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, singleton upper
fill_value = (np.array([-100, -200, -300]), 100)
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 100],
[-300, 100]]] * 2)
# broadcastable (2,) lower, singleton upper
fill_value = (np.array([-100, -200]), 100)
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 100]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, broadcastable (3,) upper
fill_value = ([-100, -200, -300], [100, 200, 300])
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2): # check ndarray as well as list here
if ii == 1:
fill_value = tuple(np.array(f) for f in fill_value)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200],
[-300, 300]]] * 2)
# broadcastable (2,) lower, broadcastable (2,) upper
fill_value = ([-100, -200], [100, 200])
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (2, 2) array-like
fill_value = [[100, 200], [1000, 2000]]
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = np.array(fill_value)
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200]],
[[1000, 1000],
[2000, 2000]]])
# broadcastable (2, 2) lower, broadcastable (2, 2) upper
fill_value = ([[-100, -200], [-1000, -2000]],
[[100, 200], [1000, 2000]])
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[-100, -200],
[-1000, -2000]])
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200]],
[[-1000, 1000],
[-2000, 2000]]])
def test_fill_value(self):
# test that two-element fill value works
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero', 'previous', 'next'):
self._check_fill_value(kind)
def test_fill_value_writeable(self):
# backwards compat: fill_value is a public writeable attribute
interp = interp1d(self.x10, self.y10, fill_value=123.0)
assert_equal(interp.fill_value, 123.0)
interp.fill_value = 321.0
assert_equal(interp.fill_value, 321.0)
def _nd_check_interp(self, kind='linear'):
# Check the behavior when the inputs and outputs are multidimensional.
# Multidimensional input.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
np.array([[3., 5.], [2., 7.]]))
# Scalar input -> 0-dim scalar array output
assert_(isinstance(interp10(1.2), np.ndarray))
assert_equal(interp10(1.2).shape, ())
# Multidimensional outputs.
interp210 = interp1d(self.x10, self.y210, kind=kind)
assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
assert_array_almost_equal(interp210(np.array([1., 2.])),
np.array([[1., 2.], [11., 12.]]))
interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
assert_array_almost_equal(interp102(np.array([1., 3.])),
np.array([[2., 3.], [6., 7.]]))
# Both at the same time!
x_new = np.array([[3., 5.], [2., 7.]])
assert_array_almost_equal(interp210(x_new),
np.array([[[3., 5.], [2., 7.]],
[[13., 15.], [12., 17.]]]))
assert_array_almost_equal(interp102(x_new),
np.array([[[6., 7.], [10., 11.]],
[[4., 5.], [14., 15.]]]))
def _nd_check_shape(self, kind='linear'):
# Check large ndim output shape
a = [4, 5, 6, 7]
y = np.arange(np.prod(a)).reshape(*a)
for n, s in enumerate(a):
x = np.arange(s)
z = interp1d(x, y, axis=n, kind=kind)
assert_array_almost_equal(z(x), y, err_msg=kind)
x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
b = list(a)
b[n:n+1] = [2,3,1]
assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
def test_nd(self):
for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
'zero', 'previous', 'next'):
self._nd_check_interp(kind)
self._nd_check_shape(kind)
def _check_complex(self, dtype=np.complex_, kind='linear'):
x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
y = x * x ** (1 + 2j)
y = y.astype(dtype)
# simple test
c = interp1d(x, y, kind=kind)
assert_array_almost_equal(y[:-1], c(x)[:-1])
# check against interpolating real+imag separately
xi = np.linspace(1, 10, 31)
cr = interp1d(x, y.real, kind=kind)
ci = interp1d(x, y.imag, kind=kind)
assert_array_almost_equal(c(xi).real, cr(xi))
assert_array_almost_equal(c(xi).imag, ci(xi))
def test_complex(self):
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero', 'previous', 'next'):
self._check_complex(np.complex64, kind)
self._check_complex(np.complex128, kind)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_circular_refs(self):
# Test interp1d can be automatically garbage collected
x = np.linspace(0, 1)
y = np.linspace(0, 1)
# Confirm interp can be released from memory after use
with assert_deallocated(interp1d, x, y) as interp:
new_y = interp([0.1, 0.2])
del interp
def test_overflow_nearest(self):
# Test that the x range doesn't overflow when given integers as input
for kind in ('nearest', 'previous', 'next'):
x = np.array([0, 50, 127], dtype=np.int8)
ii = interp1d(x, x, kind=kind)
assert_array_almost_equal(ii(x), x)
def test_local_nans(self):
# check that for local interpolation kinds (slinear, zero) a single nan
# only affects its local neighborhood
x = np.arange(10).astype(float)
y = x.copy()
y[6] = np.nan
for kind in ('zero', 'slinear'):
ir = interp1d(x, y, kind=kind)
vals = ir([4.9, 7.0])
assert_(np.isfinite(vals).all())
def test_spline_nans(self):
# Backwards compat: a single nan makes the whole spline interpolation
# return nans in an array of the correct shape. And it doesn't raise,
# just quiet nans because of backcompat.
x = np.arange(8).astype(float)
y = x.copy()
yn = y.copy()
yn[3] = np.nan
for kind in ['quadratic', 'cubic']:
ir = interp1d(x, y, kind=kind)
irn = interp1d(x, yn, kind=kind)
for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
xnew = np.asarray(xnew)
out, outn = ir(x), irn(x)
assert_(np.isnan(outn).all())
assert_equal(out.shape, outn.shape)
class TestLagrange(object):
def test_lagrange(self):
p = poly1d([5,2,1,4,3])
xs = np.arange(len(p.coeffs))
ys = p(xs)
pl = lagrange(xs,ys)
assert_array_almost_equal(p.coeffs,pl.coeffs)
class TestAkima1DInterpolator(object):
def test_eval(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344, 5.9803623910336236590978842,
5.5067291516462386624652936, 5.2031367459745245795943447,
4.1796554159017080820603951, 3.4110386597938129327189927,
3.])
assert_allclose(ak(xi), yi)
def test_eval_2d(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.column_stack((y, 2. * y))
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi = np.column_stack((yi, 2. * yi))
assert_allclose(ak(xi), yi)
def test_eval_3d(self):
x = np.arange(0., 11.)
y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.empty((11, 2, 2))
y[:, 0, 0] = y_
y[:, 1, 0] = 2. * y_
y[:, 0, 1] = 3. * y_
y[:, 1, 1] = 4. * y_
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.empty((13, 2, 2))
yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi[:, 0, 0] = yi_
yi[:, 1, 0] = 2. * yi_
yi[:, 0, 1] = 3. * yi_
yi[:, 1, 1] = 4. * yi_
assert_allclose(ak(xi), yi)
def test_degenerate_case_multidimensional(self):
# This test is for issue #5683.
x = np.array([0, 1, 2])
y = np.vstack((x, x**2)).T
ak = Akima1DInterpolator(x, y)
x_eval = np.array([0.5, 1.5])
y_eval = ak(x_eval)
assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend(None, None)
except NotImplementedError as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise
class TestPPolyCommon(object):
# test basic functionality for PPoly and BPoly
def test_sort_check(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_ctor_c(self):
# wrong shape: `c` must be at least 2-dimensional
with assert_raises(ValueError):
PPoly([1, 2], [0, 1])
def test_extend(self):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
for cls in (PPoly, BPoly):
pp = cls(c[:,:9], x[:10])
pp.extend(c[:,9:], x[10:])
pp2 = cls(c[:, 10:], x[10:])
pp2.extend(c[:, :10], x[:10])
pp3 = cls(c, x)
assert_array_equal(pp.c, pp3.c)
assert_array_equal(pp.x, pp3.x)
assert_array_equal(pp2.c, pp3.c)
assert_array_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self):
# Test extending polynomial with different order one
np.random.seed(1234)
x = np.linspace(0, 1, 6)
c = np.random.rand(2, 5)
x2 = np.linspace(1, 2, 6)
c2 = np.random.rand(4, 5)
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = np.linspace(0, 1, 300, endpoint=False)
xi2 = np.linspace(1, 2, 300)
assert_allclose(pp1(xi1), pp_comb(xi1))
assert_allclose(pp2(xi2), pp_comb(xi2))
def test_extend_descending(self):
np.random.seed(0)
order = 3
x = np.sort(np.random.uniform(0, 10, 20))
c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
for cls in (PPoly, BPoly):
p = cls(c, x)
p1 = cls(c[:, :9], x[:10])
p1.extend(c[:, 9:], x[10:])
p2 = cls(c[:, 10:], x[10:])
p2.extend(c[:, :10], x[:10])
assert_array_equal(p1.c, p.c)
assert_array_equal(p1.x, p.x)
assert_array_equal(p2.c, p.c)
assert_array_equal(p2.x, p.x)
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert_equal(np.shape(p(0.5)), ())
assert_equal(np.shape(p(np.array(0.5))), ())
# can't use dtype=object (with any numpy; what fails is
# constructing the object array here for old numpy)
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]]))
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
assert_allclose(p(xp, nu).real, p_re(xp, nu))
assert_allclose(p(xp, nu).imag, p_im(xp, nu))
def test_axis(self):
np.random.seed(12345)
c = np.random.rand(3, 4, 5, 6, 7, 8)
c_s = c.shape
xp = np.random.random((1, 2))
for axis in (0, 1, 2, 3):
k, m = c.shape[axis], c.shape[axis+1]
x = np.sort(np.random.rand(m+1))
for cls in (PPoly, BPoly):
p = cls(c, x, axis=axis)
assert_equal(p.c.shape,
c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
res = p(xp)
targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
assert_equal(res.shape, targ_shape)
# deriv/antideriv does not drop the axis
for p1 in [cls(c, x, axis=axis).derivative(),
cls(c, x, axis=axis).derivative(2),
cls(c, x, axis=axis).antiderivative(),
cls(c, x, axis=axis).antiderivative(2)]:
assert_equal(p1.axis, p.axis)
# c array needs two axes for the coefficients and intervals, so
# 0 <= axis < c.ndim-1; raise otherwise
for axis in (-1, 4, 5, 6):
for cls in (BPoly, PPoly):
assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
class TestPolySubclassing(object):
class P(PPoly):
pass
class B(BPoly):
pass
def _make_polynomials(self):
np.random.seed(1234)
x = np.sort(np.random.random(3))
c = np.random.random((4, 2))
return self.P(c, x), self.B(c, x)
def test_derivative(self):
pp, bp = self._make_polynomials()
for p in (pp, bp):
pd = p.derivative()
assert_equal(p.__class__, pd.__class__)
ppa = pp.antiderivative()
assert_equal(pp.__class__, ppa.__class__)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = self.P.from_spline(spl)
assert_equal(pp.__class__, self.P)
def test_conversions(self):
pp, bp = self._make_polynomials()
pp1 = self.P.from_bernstein_basis(bp)
assert_equal(pp1.__class__, self.P)
bp1 = self.B.from_power_basis(pp)
assert_equal(bp1.__class__, self.B)
def test_from_derivatives(self):
x = [0, 1, 2]
y = [[1], [2], [3]]
bp = self.B.from_derivatives(x, y)
assert_equal(bp.__class__, self.B)
class TestPPoly(object):
def test_simple(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_periodic(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x, extrapolate='periodic')
assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
def test_descending(self):
def binom_matrix(power):
n = np.arange(power + 1).reshape(-1, 1)
k = np.arange(power + 1)
B = binom(n, k)
return B[::-1, ::-1]
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-2, 2, size=(power + 1, m))
h = np.diff(x)
h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
B = binom_matrix(power)
cap = ca * h_powers
cdp = np.dot(B.T, cap)
cd = cdp / h_powers
pa = PPoly(ca, x, extrapolate=True)
pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-13)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-13)
roots_d = pd.roots()
roots_a = pa.roots()
assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
ip = p.antiderivative()
assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_vs_alternative_implementations(self):
np.random.seed(1234)
c = np.random.rand(3, 12, 22)
x = np.sort(np.r_[0, np.random.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
assert_allclose(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
assert_allclose(p(xp)[:,0], expected)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
assert_allclose(pp(xi), splev(xi, spl))
# make sure .from_spline accepts BSpline objects
b = BSpline(*spl)
ppp = PPoly.from_spline(b)
assert_allclose(ppp(xi), b(xi))
# BSpline's extrapolate attribute propagates unless overridden
t, c, k = spl
for extrap in (None, True, False):
b = BSpline(t, c, k, extrapolate=extrap)
p = PPoly.from_spline(b)
assert_equal(p.extrapolate, b.extrapolate)
def test_derivative_simple(self):
np.random.seed(1234)
c = np.array([[4, 3, 2, 1]]).T
dc = np.array([[3*4, 2*3, 2]]).T
ddc = np.array([[2*3*4, 1*2*3]]).T
x = np.array([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
assert_allclose(pp.derivative().c, dpp.c)
assert_allclose(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
assert_allclose(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
err_msg="dx=%d" % (dx,))
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
assert_equal(q.c, [[1, 0.5], [0, 1]])
assert_equal(q.x, [0, 1, 2])
assert_allclose(p.integrate(0, 2), 1.5)
assert_allclose(q(2) - q(0), 1.5)
def test_antiderivative_simple(self):
np.random.seed(1234)
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = np.array([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
x = np.array([0, 0.25, 1])
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
assert_allclose(ipp.x, x)
assert_allclose(ipp.c.T, ic.T)
assert_allclose(iipp.c.T, iic.T)
assert_allclose(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
np.random.seed(1234)
x = np.linspace(0, 1, 30)**2
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
assert_allclose(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
def test_antiderivative_vs_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
assert_allclose(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_antiderivative_continuity(self):
c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
ip = p.antiderivative()
# check continuity
assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
# check that only lowest order coefficients were changed
p2 = ip.derivative()
assert_allclose(p2.c, p.c)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
assert_allclose(ig, ipp(b) - ipp(a))
assert_allclose(ig, splint(a, b, spl))
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
assert_allclose(ig, ipp(b) - ipp(a))
assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = PPoly(c, x, extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
assert_allclose(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
assert_array_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
# ditto for p.solve(const) with sections identically equal const
const = 2.
c1 = c.copy()
c1[1, :] += const
pp1 = PPoly(c1, x)
assert_array_equal(pp1.solve(const),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_all_zero(self):
# test the code path for the polynomial being identically zero everywhere
c = [[0], [0]]
x = [0, 1]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan])
assert_array_equal(p.solve(0), [0, np.nan])
assert_array_equal(p.solve(1), [])
c = [[0, 0], [0, 0]]
x = [0, 1, 2]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(1), [])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [-2, 0])
assert_array_equal(pp.roots(extrapolate=False), [0])
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [0.5])
assert_array_equal(pp.roots(discontinuity=False), [])
# ditto for a discontinuity across y:
assert_array_equal(pp.solve(0.5), [0.5])
assert_array_equal(pp.solve(0.5, discontinuity=False), [])
assert_array_equal(pp.solve(1.5), [])
assert_array_equal(pp.solve(1.5, discontinuity=False), [])
def test_roots_random(self):
# Check high-order polynomials with random coefficients
np.random.seed(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
for y in [0, np.random.random()]:
r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1,
extrapolate=extrapolate)[:,i,j]
msg = "(%r) r = %s" % (extrapolate, repr(rr),)
assert_allclose((val-y) / cmpval, 0, atol=1e-7,
err_msg=msg)
# Check that we checked a number of roots
assert_(num > 100, repr(num))
def test_roots_croots(self):
# Test the complex root finding algorithm
np.random.seed(1234)
for k in range(1, 15):
c = np.random.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
for y in [0, np.random.random()]:
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w)
if k == 1:
assert_(np.isnan(w).all())
continue
res = 0
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
assert_allclose(res, 0, atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert_(np.isnan(pp([-0.1, 1.1])).all())
assert_(np.isnan(pp_i([-0.1, 1.1])).all())
assert_(np.isnan(pp_d([-0.1, 1.1])).all())
assert_equal(pp.roots(), [1])
else:
assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
assert_allclose(pp.roots(), [1, -1])
class TestBPoly(object):
def test_simple(self):
x = [0, 1]
c = [[3]]
bp = BPoly(c, x)
assert_allclose(bp(0.1), 3.)
def test_simple2(self):
x = [0, 1]
c = [[3], [1]]
bp = BPoly(c, x) # 3*(1-x) + 1*x
assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
def test_simple3(self):
x = [0, 1]
c = [[3], [1], [4]]
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
assert_allclose(bp(0.2),
3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
def test_simple4(self):
x = [0, 1]
c = [[1], [1], [1], [2]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3)
def test_simple5(self):
x = [0, 1]
c = [[1], [1], [8], [2], [1]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4)
def test_periodic(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
# [3*(1-x)**2, 2*((x-1)/2)**2]
bp = BPoly(c, x, extrapolate='periodic')
assert_allclose(bp(3.4), 3 * 0.6**2)
assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
assert_allclose(bp(3.4, 1), -6 * 0.6)
assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
def test_descending(self):
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
# We need only to flip coefficients to get it right!
cd = ca[::-1].copy()
pa = BPoly(ca, x, extrapolate=True)
pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-12)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6)+c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
def test_interval_length(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
def test_two_intervals(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
assert_allclose(bp(0.4), 3 * 0.6*0.6)
assert_allclose(bp(1.7), 2 * (0.7/2)**2)
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert_(np.isnan(bp([-0.1, 2.1])).all())
assert_(np.isnan(bp_d([-0.1, 2.1])).all())
else:
assert_(not np.isnan(bp([-0.1, 2.1])).any())
assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
class TestBPolyCalculus(object):
def test_derivative(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
bp_der = bp.derivative()
assert_allclose(bp_der(0.4), -6*(0.6))
assert_allclose(bp_der(1.7), 0.7)
# derivatives in-place
assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
[-6*(1-0.4), 6., 0.])
assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
[0.7, 1., 0])
def test_derivative_ppoly(self):
# make sure it's consistent w/ power basis
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
for d in range(k):
bp = bp.derivative()
pp = pp.derivative()
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(bp(xp), pp(xp))
def test_deriv_inplace(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
# test both real and complex coefficients
for cc in [c.copy(), c*(1. + 2.j)]:
bp = BPoly(cc, x)
xp = np.linspace(x[0], x[-1], 21)
for i in range(k):
assert_allclose(bp(xp, i), bp.derivative(i)(xp))
def test_antiderivative_simple(self):
# f(x) = x for x \in [0, 1),
# (x-1)/2 for x \in [1, 3]
#
# antiderivative is then
# F(x) = x**2 / 2 for x \in [0, 1),
# 0.5*x*(x/2 - 1) + A for x \in [1, 3]
# where A = 3/4 for continuity at x = 1.
x = [0, 1, 3]
c = [[0, 0], [1, 1]]
bp = BPoly(c, x)
bi = bp.antiderivative()
xx = np.linspace(0, 3, 11)
assert_allclose(bi(xx),
np.where(xx < 1, xx**2 / 2.,
0.5 * xx * (xx/2. - 1) + 3./4),
atol=1e-12, rtol=1e-12)
def test_der_antider(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
xx = np.linspace(x[0], x[-1], 100)
assert_allclose(bp.antiderivative().derivative()(xx),
bp(xx), atol=1e-12, rtol=1e-12)
def test_antider_ppoly(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
xx = np.linspace(x[0], x[-1], 10)
assert_allclose(bp.antiderivative(2)(xx),
pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
def test_antider_continuous(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x).antiderivative()
xx = bp.x[1:-1]
assert_allclose(bp(xx - 1e-14),
bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
assert_allclose(bp.integrate(0, 1),
pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
def test_integrate_extrap(self):
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
# default is extrapolate=True
assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
# .integrate argument overrides self.extrapolate
b1 = BPoly(c, x, extrapolate=False)
assert_(np.isnan(b1.integrate(0, 2)))
assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_antider_neg(self):
# .derivative(-nu) ==> .andiderivative(nu) and vice versa
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
xx = np.linspace(0, 1, 21)
assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
atol=1e-12, rtol=1e-12)
assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
atol=1e-12, rtol=1e-12)
class TestPolyConversions(object):
def test_bp_from_pp(self):
x = [0, 1, 3]
c = [[3, 2], [1, 8], [4, 3]]
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = [0.1, 1.4]
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_bp_from_pp_random(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_pp_from_bp(self):
x = [0, 1, 3]
c = [[3, 3], [1, 1], [4, 2]]
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
bp1 = BPoly.from_power_basis(pp)
xp = [0.1, 1.4]
assert_allclose(bp(xp), pp(xp))
assert_allclose(bp(xp), bp1(xp))
class TestBPolyFromDerivatives(object):
def test_make_poly_1(self):
c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
assert_allclose(c1, [2., 3.])
def test_make_poly_2(self):
c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
assert_allclose(c1, [1., 1., 1.])
# f'(0) = 3
c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
assert_allclose(c2, [2., 7./2, 1.])
# f'(1) = 3
c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
assert_allclose(c3, [2., -0.5, 1.])
def test_make_poly_3(self):
# f'(0)=2, f''(0)=3
c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
assert_allclose(c1, [1., 5./3, 17./6, 4.])
# f'(1)=2, f''(1)=3
c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
assert_allclose(c2, [1., 19./6, 10./3, 4.])
# f'(0)=2, f'(1)=3
c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
assert_allclose(c3, [1., 5./3, 3., 4.])
def test_make_poly_12(self):
np.random.seed(12345)
ya = np.r_[0, np.random.random(5)]
yb = np.r_[0, np.random.random(5)]
c = BPoly._construct_from_derivatives(0, 1, ya, yb)
pp = BPoly(c[:, None], [0, 1])
for j in range(6):
assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
pp = pp.derivative()
def test_raise_degree(self):
np.random.seed(12345)
x = [0, 1]
k, d = 8, 5
c = np.random.random((k, 1, 2, 3, 4))
bp = BPoly(c, x)
c1 = BPoly._raise_degree(c, d)
bp1 = BPoly(c1, x)
xp = np.linspace(0, 1, 11)
assert_allclose(bp(xp), bp1(xp))
def test_xi_yi(self):
assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
def test_coords_order(self):
xi = [0, 0, 1]
yi = [[0], [0], [0]]
assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
def test_zeros(self):
xi = [0, 1, 2, 3]
yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
pp = BPoly.from_derivatives(xi, yi)
assert_(pp.c.shape == (4, 3))
ppd = pp.derivative()
for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
assert_allclose([pp(xp), ppd(xp)], [0., 0.])
def _make_random_mk(self, m, k):
# k derivatives at each breakpoint
np.random.seed(1234)
xi = np.asarray([1. * j**2 for j in range(m+1)])
yi = [np.random.random(k) for j in range(m+1)]
return xi, yi
def test_random_12(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi)
for order in range(k//2):
assert_allclose(pp(xi), [yy[order] for yy in yi])
pp = pp.derivative()
def test_order_zero(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
assert_raises(ValueError, BPoly.from_derivatives,
**dict(xi=xi, yi=yi, orders=0))
def test_orders_too_high(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
assert_raises(ValueError, BPoly.from_derivatives, # but this is not
**dict(xi=xi, yi=yi, orders=2*k))
def test_orders_global(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
# ok, this is confusing. Local polynomials will be of the order 5
# which means that up to the 2nd derivatives will be used at each point
order = 5
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2+1):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
# now repeat with `order` being even: on each interval, it uses
# order//2 'derivatives' @ the right-hand endpoint and
# order//2+1 @ 'derivatives' the left-hand endpoint
order = 6
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
def test_orders_local(self):
m, k = 7, 12
xi, yi = self._make_random_mk(m, k)
orders = [o + 1 for o in range(m)]
for i, x in enumerate(xi[1:-1]):
pp = BPoly.from_derivatives(xi, yi, orders=orders)
for j in range(orders[i] // 2 + 1):
assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
def test_yi_trailing_dims(self):
m, k = 7, 5
xi = np.sort(np.random.random(m+1))
yi = np.random.random((m+1, k, 6, 7, 8))
pp = BPoly.from_derivatives(xi, yi)
assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
def test_gh_5430(self):
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
orders = np.int32(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = np.int64(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
# This worked before; make sure it still works
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
class TestNdPPoly(object):
def test_simple_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
xi = np.random.rand(200)
p = NdPPoly(c, (x,))
v1 = p((xi,))
v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
assert_allclose(v1, v2)
def test_simple_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7)
x = np.linspace(0, 1, 6+1)
y = np.linspace(0, 1, 7+1)**2
xi = np.random.rand(200)
yi = np.random.rand(200)
v1 = np.empty([len(xi), 1], dtype=c.dtype)
v1.fill(np.nan)
_ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
(x, y),
np.array([4, 5], dtype=np.intc),
np.c_[xi, yi],
np.array([0, 0], dtype=np.intc),
1,
v1)
v1 = v1.ravel()
v2 = _ppoly2d_eval(c, (x, y), xi, yi)
assert_allclose(v1, v2)
p = NdPPoly(c, (x, y))
for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
v1 = p(np.c_[xi, yi], nu=nu)
v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
xi = np.random.rand(40)
yi = np.random.rand(40)
zi = np.random.rand(40)
p = NdPPoly(c, (x, y, z))
for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
(6, 0, 2)):
v1 = p((xi, yi, zi), nu=nu)
v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_4d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
x = np.linspace(0, 1, 8+1)
y = np.linspace(0, 1, 9+1)**2
z = np.linspace(0, 1, 10+1)**3
u = np.linspace(0, 1, 11+1)**4
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
ui = np.random.rand(20)
p = NdPPoly(c, (x, y, z, u))
v1 = p((xi, yi, zi, ui))
v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
assert_allclose(v1, v2)
def test_deriv_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
p = NdPPoly(c, (x,))
# derivative
dp = p.derivative(nu=[1])
p1 = PPoly(c, x)
dp1 = p1.derivative()
assert_allclose(dp.c, dp1.c)
# antiderivative
dp = p.antiderivative(nu=[2])
p1 = PPoly(c, x)
dp1 = p1.antiderivative(2)
assert_allclose(dp.c, dp1.c)
def test_deriv_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
p = NdPPoly(c, (x, y, z))
# differentiate vs x
p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
dp = p.derivative(nu=[2])
dp1 = p1.derivative(2)
assert_allclose(dp.c,
dp1.c.transpose(0, 2, 3, 1, 4, 5))
# antidifferentiate vs y
p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
dp = p.antiderivative(nu=[0, 1, 0])
dp1 = p1.antiderivative(1)
assert_allclose(dp.c,
dp1.c.transpose(2, 0, 3, 4, 1, 5))
# differentiate vs z
p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
dp = p.derivative(nu=[0, 0, 3])
dp1 = p1.derivative(3)
assert_allclose(dp.c,
dp1.c.transpose(2, 3, 0, 4, 5, 1))
def test_deriv_3d_simple(self):
# Integrate to obtain function x y**2 z**4 / (2! 4!)
c = np.ones((1, 1, 1, 3, 4, 5))
x = np.linspace(0, 1, 3+1)**1
y = np.linspace(0, 1, 4+1)**2
z = np.linspace(0, 1, 5+1)**3
p = NdPPoly(c, (x, y, z))
ip = p.antiderivative((1, 0, 4))
ip = ip.antiderivative((0, 2, 0))
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
assert_allclose(ip((xi, yi, zi)),
xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
def test_integrate_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 16, 17)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
# make continuously differentiable so that nquad() has an
# easier time
c = c.transpose(0, 2, 1, 3)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, x, 2)
c = cx.reshape(c.shape)
c = c.transpose(0, 2, 1, 3)
c = c.transpose(1, 3, 0, 2)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, y, 2)
c = cx.reshape(c.shape)
c = c.transpose(2, 0, 3, 1).copy()
# Check integration
p = NdPPoly(c, (x, y))
for ranges in [[(0, 1), (0, 1)],
[(0, 0.5), (0, 1)],
[(0, 1), (0, 0.5)],
[(0.3, 0.7), (0.6, 0.2)]]:
ig = p.integrate(ranges)
ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
err_msg=repr(ranges))
def test_integrate_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 16, 17, 18)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
z = np.linspace(0, 1, 18+1)**3
# Check 1D integration
p = NdPPoly(c, (x, y, z))
u = np.random.rand(200)
v = np.random.rand(200)
a, b = 0.2, 0.7
px = p.integrate_1d(a, b, axis=0)
pax = p.antiderivative((1, 0, 0))
assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
py = p.integrate_1d(a, b, axis=1)
pay = p.antiderivative((0, 1, 0))
assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
pz = p.integrate_1d(a, b, axis=2)
paz = p.antiderivative((0, 0, 1))
assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert_(x[j] <= xp < x[j+1])
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
def _dpow(x, y, n):
"""
d^n (x**y) / dx^n
"""
if n < 0:
raise ValueError("invalid derivative order")
elif n > y:
return 0
else:
return poch(y - n + 1, n) * x**(y - n)
def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
"""
Straightforward evaluation of 2D piecewise polynomial
"""
if nu is None:
nu = (0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny = c.shape[:2]
for jout, (x, y) in enumerate(zip(xnew, ynew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
val += (c[nx-k1-1,ny-k2-1,j1,j2]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1]))
out[jout] = val
return out
def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
"""
Straightforward evaluation of 3D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny, nz = c.shape[:3]
for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2]))
out[jout] = val
return out
def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
"""
Straightforward evaluation of 4D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
mx, my, mz, mu = c.shape[:4]
for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1]) and
(xs[3][0] <= u <= xs[3][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
j4 = np.searchsorted(xs[3], u) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
s4 = u - xs[3][j4]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
for k4 in range(c.shape[3]):
val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2])
* _dpow(s4, k4, nu[3]))
out[jout] = val
return out
class TestRegularGridInterpolator(object):
def _get_sample_4d(self):
# create a 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 4
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def _get_sample_4d_2(self):
# create another 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_list_input(self):
points, values = self._get_sample_4d()
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points,
values.tolist(),
method=method)
v1 = interp(sample.tolist())
interp = RegularGridInterpolator(points,
values,
method=method)
v2 = interp(sample)
assert_allclose(v1, v2)
def test_complex(self):
points, values = self._get_sample_4d()
values = values - 2j*values
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points, values,
method=method)
rinterp = RegularGridInterpolator(points, values.real,
method=method)
iinterp = RegularGridInterpolator(points, values.imag,
method=method)
v1 = interp(sample)
v2 = rinterp(sample) + 1j*iinterp(sample)
assert_allclose(v1, v2)
def test_linear_xi1d(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([0.1, 0.1, 10., 9.])
wanted = 1001.1
assert_array_almost_equal(interp(sample), wanted)
def test_linear_xi3d(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([0.1, 0.1, .9, .9])
wanted = 1100.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.1, 0.1, 0.1])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0., 0., 0., 0.])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([1., 1., 1., 1.])
wanted = 1111.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.4, 0.6, 0.9])
wanted = 1055.
assert_array_almost_equal(interp(sample), wanted)
def test_linear_edges(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
wanted = np.asarray([0., 1111.])
assert_array_almost_equal(interp(sample), wanted)
def test_valid_create(self):
# create a 2d grid of 3 points in each dimension
points = [(0., .5, 1.), (0., 1., .5)]
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis]
values1 = values[np.newaxis, :]
values = (values0 + values1 * 10)
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [((0., .5, 1.), ), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, .75, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values,
method="undefmethod")
def test_valid_call(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
assert_raises(ValueError, interp, sample, "undefmethod")
sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
assert_raises(ValueError, interp, sample)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
assert_raises(ValueError, interp, sample)
def test_out_of_bounds_extrap(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 1111., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_extrap2(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 11., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_fill(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=np.nan)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([np.nan, np.nan, np.nan])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
assert_array_almost_equal(interp(sample, method="linear"), wanted)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_linear_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
interp = RegularGridInterpolator((x, y), values,
method=method)
v1 = interp([0.4, 0.7])
interp = RegularGridInterpolator((x, y), values._v,
method=method)
v2 = interp([0.4, 0.7])
assert_allclose(v1, v2)
def test_invalid_fill_value(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.random.rand(5, 7)
# integers can be cast to floats
RegularGridInterpolator((x, y), values, fill_value=1)
# complex values cannot
assert_raises(ValueError, RegularGridInterpolator,
(x, y), values, fill_value=1+2j)
def test_fillvalue_type(self):
# from #3703; test that interpolator object construction succeeds
values = np.ones((10, 20, 30), dtype='>f4')
points = [np.arange(n) for n in values.shape]
xi = [(1, 1, 1)]
interpolator = RegularGridInterpolator(points, values)
interpolator = RegularGridInterpolator(points, values, fill_value=0.)
class MyValue(object):
"""
Minimal indexable object
"""
def __init__(self, shape):
self.ndim = 2
self.shape = shape
self._v = np.arange(np.prod(shape)).reshape(shape)
def __getitem__(self, idx):
return self._v[idx]
def __array_interface__(self):
return None
def __array__(self):
raise RuntimeError("No array representation")
class TestInterpN(object):
def _sample_2d_data(self):
x = np.arange(1, 6)
x = np.array([.5, 2., 3., 4., 5.5])
y = np.arange(1, 6)
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
def test_list_input(self):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['nearest', 'linear', 'splinef2d']:
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
xi.tolist(), method=method)
assert_allclose(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
assert_allclose(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert_equal(v1.shape, (2, 3))
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_xi_broadcast(self):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
for method in ['nearest', 'linear', 'splinef2d']:
sample = (xi[:,None], yi[None,:])
v1 = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v1.shape, (2, 3))
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_nonscalar_values(self):
# Verify that non-scalar valued values also works
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
for method in ['nearest', 'linear']:
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v.shape, (7, 11, 6), err_msg=method)
vs = [interpn(points, values[...,j], sample, method=method,
bounds_error=False)
for j in range(6)]
v2 = np.array(vs).transpose(1, 2, 0)
assert_allclose(v, v2, err_msg=method)
# Vector-valued splines supported with fitpack
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
def test_complex(self):
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['linear', 'nearest']:
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
assert_allclose(v1, v2)
# Complex-valued data not supported by spline2fd
_assert_warns(np.ComplexWarning, interpn, points, values,
sample, method='splinef2d')
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
assert_allclose(v1, v2)
def test_matrix_input(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.matrix(np.random.rand(5, 7))
sample = np.random.rand(3, 7, 2)
for method in ('nearest', 'linear', 'splinef2d'):
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
assert_allclose(v1, np.asmatrix(v2))
| 104,326 | 36.772266 | 84 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_regression.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.interpolate as interp
from numpy.testing import assert_almost_equal
class TestRegression(object):
def test_spalde_scalar_input(self):
"""Ticket #629"""
x = np.linspace(0,10)
y = x**3
tck = interp.splrep(x, y, k=3, t=[5])
res = interp.spalde(np.float64(1), tck)
des = np.array([1., 3., 6., 6.])
assert_almost_equal(res, des)
| 484 | 27.529412 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_pade.py
|
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_array_equal, assert_array_almost_equal)
from scipy.interpolate import pade
def test_pade_trivial():
nump, denomp = pade([1.0], 0)
assert_array_equal(nump.c, [1.0])
assert_array_equal(denomp.c, [1.0])
def test_pade_4term_exp():
# First four Taylor coefficients of exp(x).
# Unlike poly1d, the first array element is the zero-order term.
an = [1.0, 1.0, 0.5, 1.0/6]
nump, denomp = pade(an, 0)
assert_array_almost_equal(nump.c, [1.0/6, 0.5, 1.0, 1.0])
assert_array_almost_equal(denomp.c, [1.0])
nump, denomp = pade(an, 1)
assert_array_almost_equal(nump.c, [1.0/6, 2.0/3, 1.0])
assert_array_almost_equal(denomp.c, [-1.0/3, 1.0])
nump, denomp = pade(an, 2)
assert_array_almost_equal(nump.c, [1.0/3, 1.0])
assert_array_almost_equal(denomp.c, [1.0/6, -2.0/3, 1.0])
nump, denomp = pade(an, 3)
assert_array_almost_equal(nump.c, [1.0])
assert_array_almost_equal(denomp.c, [-1.0/6, 0.5, -1.0, 1.0])
| 1,068 | 31.393939 | 73 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpnd.py
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_almost_equal
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
import scipy.interpolate.interpnd as interpnd
import scipy.spatial.qhull as qhull
import pickle
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
class TestLinearNDInterpolation(object):
def test_smoketest(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
yi = interpnd.LinearNDInterpolator(x, y)(x)
assert_almost_equal(y, yi)
def test_smoketest_alternate(self):
# Test at single points, alternate calling convention
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
yi = interpnd.LinearNDInterpolator((x[:,0], x[:,1]), y)(x[:,0], x[:,1])
assert_almost_equal(y, yi)
def test_complex_smoketest(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
yi = interpnd.LinearNDInterpolator(x, y)(x)
assert_almost_equal(y, yi)
def test_tri_input(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.LinearNDInterpolator(tri, y)(x)
assert_almost_equal(y, yi)
def test_square(self):
# Test barycentric interpolation on a square against a manual
# implementation
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
values = np.array([1., 2., -3., 5.], dtype=np.double)
# NB: assume triangles (0, 1, 3) and (1, 2, 3)
#
# 1----2
# | \ |
# | \ |
# 0----3
def ip(x, y):
t1 = (x + y <= 1)
t2 = ~t1
x1 = x[t1]
y1 = y[t1]
x2 = x[t2]
y2 = y[t2]
z = 0*x
z[t1] = (values[0]*(1 - x1 - y1)
+ values[1]*y1
+ values[3]*x1)
z[t2] = (values[2]*(x2 + y2 - 1)
+ values[1]*(1 - x2)
+ values[3]*(1 - y2))
return z
xx, yy = np.broadcast_arrays(np.linspace(0, 1, 14)[:,None],
np.linspace(0, 1, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
zi = interpnd.LinearNDInterpolator(points, values)(xi)
assert_almost_equal(zi, ip(xx, yy))
def test_smoketest_rescale(self):
# Test at single points
x = np.array([(0, 0), (-5, -5), (-5, 5), (5, 5), (2.5, 3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
yi = interpnd.LinearNDInterpolator(x, y, rescale=True)(x)
assert_almost_equal(y, yi)
def test_square_rescale(self):
# Test barycentric interpolation on a rectangle with rescaling
# agaings the same implementation without rescaling
points = np.array([(0,0), (0,100), (10,100), (10,0)], dtype=np.double)
values = np.array([1., 2., -3., 5.], dtype=np.double)
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
np.linspace(0, 100, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
zi = interpnd.LinearNDInterpolator(points, values)(xi)
zi_rescaled = interpnd.LinearNDInterpolator(points, values,
rescale=True)(xi)
assert_almost_equal(zi, zi_rescaled)
def test_tripoints_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.LinearNDInterpolator(tri.points, y)(x)
yi_rescale = interpnd.LinearNDInterpolator(tri.points, y,
rescale=True)(x)
assert_almost_equal(yi, yi_rescale)
def test_tri_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
try:
interpnd.LinearNDInterpolator(tri, y, rescale=True)(x)
except ValueError as e:
if str(e) != ("Rescaling is not supported when passing a "
"Delaunay triangulation as ``points``."):
raise
except:
raise
def test_pickle(self):
# Test at single points
np.random.seed(1234)
x = np.random.rand(30, 2)
y = np.random.rand(30) + 1j*np.random.rand(30)
ip = interpnd.LinearNDInterpolator(x, y)
ip2 = pickle.loads(pickle.dumps(ip))
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
class TestEstimateGradients2DGlobal(object):
def test_smoketest(self):
x = np.array([(0, 0), (0, 2),
(1, 0), (1, 2), (0.25, 0.75), (0.6, 0.8)], dtype=float)
tri = qhull.Delaunay(x)
# Should be exact for linear functions, independent of triangulation
funcs = [
(lambda x, y: 0*x + 1, (0, 0)),
(lambda x, y: 0 + x, (1, 0)),
(lambda x, y: -2 + y, (0, 1)),
(lambda x, y: 3 + 3*x + 14.15*y, (3, 14.15))
]
for j, (func, grad) in enumerate(funcs):
z = func(x[:,0], x[:,1])
dz = interpnd.estimate_gradients_2d_global(tri, z, tol=1e-6)
assert_equal(dz.shape, (6, 2))
assert_allclose(dz, np.array(grad)[None,:] + 0*dz,
rtol=1e-5, atol=1e-5, err_msg="item %d" % j)
def test_regression_2359(self):
# Check regression --- for certain point sets, gradient
# estimation could end up in an infinite loop
points = np.load(data_file('estimate_gradients_hang.npy'))
values = np.random.rand(points.shape[0])
tri = qhull.Delaunay(points)
# This should not hang
with suppress_warnings() as sup:
sup.filter(interpnd.GradientEstimationWarning,
"Gradient estimation did not converge")
interpnd.estimate_gradients_2d_global(tri, values, maxiter=1)
class TestCloughTocher2DInterpolator(object):
def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, rescale=False, **kw):
np.random.seed(1234)
if x is None:
x = np.array([(0, 0), (0, 1),
(1, 0), (1, 1), (0.25, 0.75), (0.6, 0.8),
(0.5, 0.2)],
dtype=float)
if not alternate:
ip = interpnd.CloughTocher2DInterpolator(x, func(x[:,0], x[:,1]),
tol=1e-6, rescale=rescale)
else:
ip = interpnd.CloughTocher2DInterpolator((x[:,0], x[:,1]),
func(x[:,0], x[:,1]),
tol=1e-6, rescale=rescale)
p = np.random.rand(50, 2)
if not alternate:
a = ip(p)
else:
a = ip(p[:,0], p[:,1])
b = func(p[:,0], p[:,1])
try:
assert_allclose(a, b, **kw)
except AssertionError:
print(abs(a - b))
print(ip.grad)
raise
def test_linear_smoketest(self):
# Should be exact for linear functions, independent of triangulation
funcs = [
lambda x, y: 0*x + 1,
lambda x, y: 0 + x,
lambda x, y: -2 + y,
lambda x, y: 3 + 3*x + 14.15*y,
]
for j, func in enumerate(funcs):
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
err_msg="Function %d" % j)
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
alternate=True,
err_msg="Function (alternate) %d" % j)
# check rescaling
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
err_msg="Function (rescaled) %d" % j, rescale=True)
self._check_accuracy(func, tol=1e-13, atol=1e-7, rtol=1e-7,
alternate=True, rescale=True,
err_msg="Function (alternate, rescaled) %d" % j)
def test_quadratic_smoketest(self):
# Should be reasonably accurate for quadratic functions
funcs = [
lambda x, y: x**2,
lambda x, y: y**2,
lambda x, y: x**2 - y**2,
lambda x, y: x*y,
]
for j, func in enumerate(funcs):
self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
err_msg="Function %d" % j)
self._check_accuracy(func, tol=1e-9, atol=0.22, rtol=0,
err_msg="Function %d" % j, rescale=True)
def test_tri_input(self):
# Test at single points
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.CloughTocher2DInterpolator(tri, y)(x)
assert_almost_equal(y, yi)
def test_tri_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
try:
interpnd.CloughTocher2DInterpolator(tri, y, rescale=True)(x)
except ValueError as a:
if str(a) != ("Rescaling is not supported when passing a "
"Delaunay triangulation as ``points``."):
raise
except:
raise
def test_tripoints_input_rescale(self):
# Test at single points
x = np.array([(0,0), (-5,-5), (-5,5), (5, 5), (2.5, 3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 3j*y
tri = qhull.Delaunay(x)
yi = interpnd.CloughTocher2DInterpolator(tri.points, y)(x)
yi_rescale = interpnd.CloughTocher2DInterpolator(tri.points, y, rescale=True)(x)
assert_almost_equal(yi, yi_rescale)
def test_dense(self):
# Should be more accurate for dense meshes
funcs = [
lambda x, y: x**2,
lambda x, y: y**2,
lambda x, y: x**2 - y**2,
lambda x, y: x*y,
lambda x, y: np.cos(2*np.pi*x)*np.sin(2*np.pi*y)
]
np.random.seed(4321) # use a different seed than the check!
grid = np.r_[np.array([(0,0), (0,1), (1,0), (1,1)], dtype=float),
np.random.rand(30*30, 2)]
for j, func in enumerate(funcs):
self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
err_msg="Function %d" % j)
self._check_accuracy(func, x=grid, tol=1e-9, atol=5e-3, rtol=1e-2,
err_msg="Function %d" % j, rescale=True)
def test_wrong_ndim(self):
x = np.random.randn(30, 3)
y = np.random.randn(30)
assert_raises(ValueError, interpnd.CloughTocher2DInterpolator, x, y)
def test_pickle(self):
# Test at single points
np.random.seed(1234)
x = np.random.rand(30, 2)
y = np.random.rand(30) + 1j*np.random.rand(30)
ip = interpnd.CloughTocher2DInterpolator(x, y)
ip2 = pickle.loads(pickle.dumps(ip))
assert_almost_equal(ip(0.5, 0.5), ip2(0.5, 0.5))
| 12,648 | 34.431373 | 92 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpolate_wrapper.py
|
""" module to test interpolate_wrapper.py
"""
from __future__ import division, print_function, absolute_import
from numpy import arange, allclose, ones, isnan
import numpy as np
from numpy.testing import (assert_, assert_allclose)
from scipy._lib._numpy_compat import suppress_warnings
# functionality to be tested
from scipy.interpolate.interpolate_wrapper import (linear, logarithmic,
block_average_above, nearest)
class Test(object):
def assertAllclose(self, x, y, rtol=1.0e-5):
for i, xi in enumerate(x):
assert_(allclose(xi, y[i], rtol) or (isnan(xi) and isnan(y[i])))
def test_nearest(self):
N = 5
x = arange(N)
y = arange(N)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`nearest` is deprecated")
assert_allclose(y, nearest(x, y, x+.1))
assert_allclose(y, nearest(x, y, x-.1))
def test_linear(self):
N = 3000.
x = arange(N)
y = arange(N)
new_x = arange(N)+0.5
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`linear` is deprecated")
new_y = linear(x, y, new_x)
assert_allclose(new_y[:5], [0.5, 1.5, 2.5, 3.5, 4.5])
def test_block_average_above(self):
N = 3000
x = arange(N, dtype=float)
y = arange(N, dtype=float)
new_x = arange(N // 2) * 2
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`block_average_above` is deprecated")
new_y = block_average_above(x, y, new_x)
assert_allclose(new_y[:5], [0.0, 0.5, 2.5, 4.5, 6.5])
def test_linear2(self):
N = 3000
x = arange(N, dtype=float)
y = ones((100,N)) * arange(N)
new_x = arange(N) + 0.5
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`linear` is deprecated")
new_y = linear(x, y, new_x)
assert_allclose(new_y[:5,:5],
[[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5, 4.5]])
def test_logarithmic(self):
N = 4000.
x = arange(N)
y = arange(N)
new_x = arange(N)+0.5
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "`logarithmic` is deprecated")
new_y = logarithmic(x, y, new_x)
correct_y = [np.NaN, 1.41421356, 2.44948974, 3.46410162, 4.47213595]
assert_allclose(new_y[:5], correct_y)
def runTest(self):
test_list = [name for name in dir(self) if name.find('test_') == 0]
for test_name in test_list:
exec("self.%s()" % test_name)
| 2,850 | 33.768293 | 81 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_rbf.py
|
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
check_rbf1d_interpolation(function)
check_rbf2d_interpolation(function)
check_rbf3d_interpolation(function)
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
check_rbf1d_stability(function)
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
| 4,505 | 27.884615 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_polyint.py
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
assert_allclose, assert_equal, assert_)
from pytest import raises as assert_raises
from scipy.interpolate import (
KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
approximate_taylor_polynomial, pchip, PchipInterpolator,
pchip_interpolate, Akima1DInterpolator, CubicSpline, make_interp_spline)
from scipy._lib.six import xrange
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
extra_args={}):
np.random.seed(1234)
x = [-1, 0, 1, 2, 3, 4]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = np.random.rand(*((6,) + y_shape)).transpose(s)
# Cython code chokes on y.shape = (0, 3) etc, skip them
if y.size == 0:
return
xi = np.zeros(x_shape)
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert_equal(yi.shape, target_shape)
# check it works also with lists
if x_shape and y.size > 0:
interpolator_cls(list(x), list(y), axis=axis, **extra_args)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
yv = yv.reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
assert_allclose(yi, y)
SHAPES = [(), (0,), (1,), (6, 2, 5)]
def test_shapes():
def spl_interp(x, y, axis):
return make_interp_spline(x, y, axis=axis)
for ip in [KroghInterpolator, BarycentricInterpolator, pchip,
Akima1DInterpolator, CubicSpline, spl_interp]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
if ip != CubicSpline:
check_shape(ip, s1, s2, None, axis)
else:
for bc in ['natural', 'clamped']:
extra = {'bc_type': bc}
check_shape(ip, s1, s2, None, axis, extra)
def test_derivs_shapes():
def krogh_derivs(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(krogh_derivs, s1, s2, (6,), axis)
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_antideriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_antideriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
def akima_deriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).derivative()
def akima_antideriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).antiderivative()
def cspline_deriv(x, y, axis=0):
return CubicSpline(x, y, axis).derivative()
def cspline_antideriv(x, y, axis=0):
return CubicSpline(x, y, axis).antiderivative()
def bspl_deriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).derivative()
def bspl_antideriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).antiderivative()
for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
check_shape(ip, s1, s2, (), axis)
def _check_complex(ip):
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
p = ip(x, y)
assert_allclose(y, p(x))
def test_complex():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
_check_complex(ip)
class TestKrogh(object):
def setup_method(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
def test_wrapper(self):
P = KroghInterpolator(self.xs, self.ys)
ki = krogh_interpolate
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
assert_almost_equal(P.derivative(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=2))
assert_almost_equal(P.derivatives(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
def test_int_inputs(self):
# Check input args are cast correctly to floats, gh-3669
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
13104, 60000]
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
-0.48002351, -0.34925329, -0.26503107,
-0.13148093, -0.12988833, -0.12979296,
-0.12973574, -0.08582937, 0.05])
f = KroghInterpolator(x, offset_cdf)
assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
0, atol=1e-10)
def test_derivatives_complex(self):
# regression test for gh-7381: krogh.derivatives(0) fails complex y
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
func = KroghInterpolator(x, y)
cmplx = func.derivatives(0)
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
1j*KroghInterpolator(x, y.imag).derivatives(0))
assert_allclose(cmplx, cmplx2, atol=1e-15)
class TestTaylor(object):
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in xrange(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class TestBarycentric(object):
def setup_method(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7)))
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3],self.ys[:3])
P.add_xi(self.xs[3:],self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = BarycentricInterpolator(xs,ys)
Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_wrapper(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))
class TestPCHIP(object):
def _make_random(self, npts=20):
np.random.seed(1234)
xi = np.sort(np.random.random(npts))
yi = np.random.random(npts)
return pchip(xi, yi), xi, yi
def test_overshoot(self):
# PCHIP should not overshoot
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
if y1 > y2:
y1, y2 = y2, y1
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y1 <= yp) & (yp <= y2)).all())
def test_monotone(self):
# PCHIP should preserve monotonicty
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
def test_cast(self):
# regression test for integer input data, see gh-3453
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
xx = np.arange(100)
curve = pchip(data[0], data[1])(xx)
data1 = data * 1.0
curve1 = pchip(data1[0], data1[1])(xx)
assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
def test_nag(self):
# Example from NAG C implementation,
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
# suggested in gh-5326 as a smoke test for the way the derivatives
# are computed (see also gh-3453)
from scipy._lib.six import StringIO
dataStr = '''
7.99 0.00000E+0
8.09 0.27643E-4
8.19 0.43750E-1
8.70 0.16918E+0
9.20 0.46943E+0
10.00 0.94374E+0
12.00 0.99864E+0
15.00 0.99992E+0
20.00 0.99999E+0
'''
data = np.loadtxt(StringIO(dataStr))
pch = pchip(data[:,0], data[:,1])
resultStr = '''
7.9900 0.0000
9.1910 0.4640
10.3920 0.9645
11.5930 0.9965
12.7940 0.9992
13.9950 0.9998
15.1960 0.9999
16.3970 1.0000
17.5980 1.0000
18.7990 1.0000
20.0000 1.0000
'''
result = np.loadtxt(StringIO(resultStr))
assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
def test_endslopes(self):
# this is a smoke test for gh-3453: PCHIP interpolator should not
# set edge slopes to zero if the data do not suggest zero edge derivatives
x = np.array([0.0, 0.1, 0.25, 0.35])
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
for pp in (pchip(x, y1), pchip(x, y2)):
for t in (x[0], x[-1]):
assert_(pp(t, 1) != 0)
def test_all_zeros(self):
x = np.arange(10)
y = np.zeros_like(x)
# this should work and not generate any warnings
with warnings.catch_warnings():
warnings.filterwarnings('error')
pch = pchip(x, y)
xx = np.linspace(0, 9, 101)
assert_equal(pch(xx), 0.)
def test_two_points(self):
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
# it tries to use a three-point scheme to estimate edge derivatives,
# while there are only two points available.
# Instead, it should construct a linear interpolator.
x = np.linspace(0, 1, 11)
p = pchip([0, 1], [0, 2])
assert_allclose(p(x), 2*x, atol=1e-15)
def test_pchip_interpolate(self):
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
[1.])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
[3.5])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
[[3.5], [1]])
def test_roots(self):
# regression test for gh-6357: .roots method should work
p = pchip([0, 1], [-1, 1])
r = p.roots()
assert_allclose(r, 0.5)
class TestCubicSpline(object):
@staticmethod
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
tol=1e-14):
"""Check that spline coefficients satisfy the continuity and boundary
conditions."""
x = S.x
c = S.c
dx = np.diff(x)
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
dxi = dx[:-1]
# Check C2 continuity.
assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
rtol=tol, atol=tol)
# Check that we found a parabola, the third derivative is 0.
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
assert_allclose(c[0], 0, rtol=tol, atol=tol)
return
# Check periodic boundary conditions.
if bc_start == 'periodic':
assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
return
# Check other boundary conditions.
if bc_start == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
elif bc_start == 'clamped':
assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
elif bc_start == 'natural':
assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_start
assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
if bc_end == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
elif bc_end == 'clamped':
assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
elif bc_end == 'natural':
assert_allclose(S(x[-1], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_end
assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
def check_all_bc(self, x, y, axis):
deriv_shape = list(y.shape)
del deriv_shape[axis]
first_deriv = np.empty(deriv_shape)
first_deriv.fill(2)
second_deriv = np.empty(deriv_shape)
second_deriv.fill(-1)
bc_all = [
'not-a-knot',
'natural',
'clamped',
(1, first_deriv),
(2, second_deriv)
]
for bc in bc_all[:3]:
S = CubicSpline(x, y, axis=axis, bc_type=bc)
self.check_correctness(S, bc, bc)
for bc_start in bc_all:
for bc_end in bc_all:
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
def test_general(self):
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
for n in [2, 3, x.size]:
self.check_all_bc(x[:n], y[:n], 0)
Y = np.empty((2, n, 2))
Y[0, :, 0] = y[:n]
Y[0, :, 1] = y[:n] - 1
Y[1, :, 0] = y[:n] + 2
Y[1, :, 1] = y[:n] + 3
self.check_all_bc(x[:n], Y, 1)
def test_periodic(self):
for n in [2, 3, 5]:
x = np.linspace(0, 2 * np.pi, n)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
Y = np.empty((2, n, 2))
Y[0, :, 0] = y
Y[0, :, 1] = y + 2
Y[1, :, 0] = y - 1
Y[1, :, 1] = y + 5
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
def test_periodic_eval(self):
x = np.linspace(0, 2 * np.pi, 10)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
def test_dtypes(self):
x = np.array([0, 1, 2, 3], dtype=int)
y = np.array([-5, 2, 3, 1], dtype=int)
S = CubicSpline(x, y)
self.check_correctness(S)
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
S = CubicSpline(x, y)
self.check_correctness(S)
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
self.check_correctness(S, "natural", (1, 2j))
y = np.array([-5, 2, 3, 1])
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
def test_small_dx(self):
rng = np.random.RandomState(0)
x = np.sort(rng.uniform(size=100))
y = 1e4 + rng.uniform(size=100)
S = CubicSpline(x, y)
self.check_correctness(S, tol=1e-13)
def test_incorrect_inputs(self):
x = np.array([1, 2, 3, 4])
y = np.array([1, 2, 3, 4])
xc = np.array([1 + 1j, 2, 3, 4])
xn = np.array([np.nan, 2, 3, 4])
xo = np.array([2, 1, 3, 4])
yn = np.array([np.nan, 2, 3, 4])
y3 = [1, 2, 3]
x1 = [1]
y1 = [1]
assert_raises(ValueError, CubicSpline, xc, y)
assert_raises(ValueError, CubicSpline, xn, y)
assert_raises(ValueError, CubicSpline, x, yn)
assert_raises(ValueError, CubicSpline, xo, y)
assert_raises(ValueError, CubicSpline, x, y3)
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
assert_raises(ValueError, CubicSpline, x1, y1)
wrong_bc = [('periodic', 'clamped'),
((2, 0), (3, 10)),
((1, 0), ),
(0., 0.),
'not-a-typo']
for bc_type in wrong_bc:
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
# Shapes mismatch when giving arbitrary derivative values:
Y = np.c_[y, y]
bc1 = ('clamped', (1, 0))
bc2 = ('clamped', (1, [0, 0, 0]))
bc3 = ('clamped', (1, [[0, 0]]))
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
# periodic condition, y[-1] must be equal to y[0]:
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
| 24,248 | 35.574661 | 98 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_fitpack.py
|
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_,
assert_almost_equal, assert_array_almost_equal)
from pytest import raises as assert_raises
from numpy import array, asarray, pi, sin, cos, arange, dot, ravel, sqrt, round
from scipy import interpolate
from scipy.interpolate.fitpack import (splrep, splev, bisplrep, bisplev,
sproot, splprep, splint, spalde, splder, splantider, insert, dblint)
from scipy.interpolate.dfitpack import regrid_smth
def data_file(basename):
return os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', basename)
def norm2(x):
return sqrt(dot(x.T,x))
def f1(x,d=0):
if d is None:
return "sin"
if x is None:
return "sin(x)"
if d % 4 == 0:
return sin(x)
if d % 4 == 1:
return cos(x)
if d % 4 == 2:
return -sin(x)
if d % 4 == 3:
return -cos(x)
def f2(x,y=0,dx=0,dy=0):
if x is None:
return "sin(x+y)"
d = dx+dy
if d % 4 == 0:
return sin(x+y)
if d % 4 == 1:
return cos(x+y)
if d % 4 == 2:
return -sin(x+y)
if d % 4 == 3:
return -cos(x+y)
def makepairs(x, y):
"""Helper function to create an array of pairs of x and y."""
# Or itertools.product (>= python 2.6)
xy = array([[a, b] for a in asarray(x) for b in asarray(y)])
return xy.T
def put(*a):
"""Produce some output if file run directly"""
import sys
if hasattr(sys.modules['__main__'], '__put_prints'):
sys.stderr.write("".join(map(str, a)) + "\n")
class TestSmokeTests(object):
"""
Smoke tests (with a few asserts) for fitpack routines -- mostly
check that they are runnable
"""
def check_1(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,at=0,xb=None,xe=None):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
nk = []
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
if at:
t = tck[0][k:-k]
else:
t = x1
nd = []
for d in range(k+1):
tol = err_est(k, d)
err = norm2(f(t,d)-splev(t,tck,d)) / norm2(f(t,d))
assert_(err < tol, (k, d, err, tol))
nd.append((err, tol))
nk.append(nd)
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
if at:
str = "at knots"
else:
str = "at the middle of nodes"
put(" per=%d s=%s Evaluation %s" % (per,repr(s),str))
put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''")
k = 1
for l in nk:
put(' %d : ' % k)
for r in l:
put(' %.1e %.1e' % r)
put('\n')
k = k+1
def check_2(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
def err_est(k, d):
# Assume f has all derivatives < 1
h = 1.0/float(N)
tol = 5 * h**(.75*(k-d))
if s > 0:
tol += 1e5*s
return tol
nk = []
for k in range(1,6):
tck = splrep(x,v,s=s,per=per,k=k,xe=xe)
nk.append([splint(ia,ib,tck),spalde(dx,tck)])
put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]" % (f(None),
repr(round(xb,3)),repr(round(xe,3)),
repr(round(a,3)),repr(round(b,3))))
put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s" % (per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3))))
put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k")
k = 1
for r in nk:
if r[0] < 0:
sr = '-'
else:
sr = ' '
put(" %d %s%.8f %.1e " % (k,sr,abs(r[0]),
abs(r[0]-(f(ib,-1)-f(ia,-1)))))
d = 0
for dr in r[1]:
err = abs(1-dr/f(dx,d))
tol = err_est(k, d)
assert_(err < tol, (k, d))
put(" %.1e %.1e" % (err, tol))
d = d+1
put("\n")
k = k+1
def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
v = f(x)
put(" k : Roots of s(x) approx %s x in [%s,%s]:" %
(f(None),repr(round(a,3)),repr(round(b,3))))
for k in range(1,6):
tck = splrep(x, v, s=s, per=per, k=k, xe=xe)
if k == 3:
roots = sproot(tck)
assert_allclose(splev(roots, tck), 0, atol=1e-10, rtol=1e-10)
assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3)
put(' %d : %s' % (k, repr(roots.tolist())))
else:
assert_raises(ValueError, sproot, tck)
def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None,
ia=0,ib=2*pi,dx=0.2*pi):
if xb is None:
xb = a
if xe is None:
xe = b
x = a+(b-a)*arange(N+1,dtype=float)/float(N) # nodes
x1 = a + (b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes
v,v1 = f(x),f(x1)
put(" u = %s N = %d" % (repr(round(dx,3)),N))
put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep " % (f(0,None)))
for k in range(1,6):
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
tck = splrep(x,v,s=s,per=per,k=k)
uv = splev(dx,tckp)
err1 = abs(uv[1]-f(uv[0]))
err2 = abs(splev(uv[0],tck)-f(uv[0]))
assert_(err1 < 1e-2)
assert_(err2 < 1e-2)
put(" %d : %s %.1e %.1e" %
(k,repr([round(z,3) for z in uv]),
err1,
err2))
put("Derivatives of parametric cubic spline at u (first function):")
k = 3
tckp,u = splprep([x,v],s=s,per=per,k=k,nest=-1)
for d in range(1,k+1):
uv = splev(dx,tckp,d)
put(" %s " % (repr(uv[0])))
def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0):
x = xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx)
y = yb+(ye-yb)*arange(Ny+1,dtype=float)/float(Ny)
xy = makepairs(x,y)
tck = bisplrep(xy[0],xy[1],f(xy[0],xy[1]),s=s,kx=kx,ky=ky)
tt = [tck[0][kx:-kx],tck[1][ky:-ky]]
t2 = makepairs(tt[0],tt[1])
v1 = bisplev(tt[0],tt[1],tck)
v2 = f2(t2[0],t2[1])
v2.shape = len(tt[0]),len(tt[1])
err = norm2(ravel(v1-v2))
assert_(err < 1e-2, err)
put(err)
def test_smoke_splrep_splev(self):
put("***************** splrep/splev")
self.check_1(s=1e-6)
self.check_1()
self.check_1(at=1)
self.check_1(per=1)
self.check_1(per=1,at=1)
self.check_1(b=1.5*pi)
self.check_1(b=1.5*pi,xe=2*pi,per=1,s=1e-1)
def test_smoke_splint_spalde(self):
put("***************** splint/spalde")
self.check_2()
self.check_2(per=1)
self.check_2(ia=0.2*pi,ib=pi)
self.check_2(ia=0.2*pi,ib=pi,N=50)
def test_smoke_sproot(self):
put("***************** sproot")
self.check_3(a=0.1,b=15)
def test_smoke_splprep_splrep_splev(self):
put("***************** splprep/splrep/splev")
self.check_4()
self.check_4(N=50)
def test_smoke_bisplrep_bisplev(self):
put("***************** bisplev")
self.check_5()
class TestSplev(object):
def test_1d_shape(self):
x = [1,2,3,4,5]
y = [4,5,6,7,8]
tck = splrep(x, y)
z = splev([1], tck)
assert_equal(z.shape, (1,))
z = splev(1, tck)
assert_equal(z.shape, ())
def test_2d_shape(self):
x = [1, 2, 3, 4, 5]
y = [4, 5, 6, 7, 8]
tck = splrep(x, y)
t = np.array([[1.0, 1.5, 2.0, 2.5],
[3.0, 3.5, 4.0, 4.5]])
z = splev(t, tck)
z0 = splev(t[0], tck)
z1 = splev(t[1], tck)
assert_equal(z, np.row_stack((z0, z1)))
def test_extrapolation_modes(self):
# test extrapolation modes
# * if ext=0, return the extrapolated value.
# * if ext=1, return 0
# * if ext=2, raise a ValueError
# * if ext=3, return the boundary value.
x = [1,2,3]
y = [0,2,4]
tck = splrep(x, y, k=1)
rstl = [[-2, 6], [0, 0], None, [0, 4]]
for ext in (0, 1, 3):
assert_array_almost_equal(splev([0, 4], tck, ext=ext), rstl[ext])
assert_raises(ValueError, splev, [0, 4], tck, ext=2)
class TestSplder(object):
def setup_method(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert_(np.diff(self.spl[0]).ptp() > 0)
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
assert_allclose(self.spl[0], spl3[0])
assert_allclose(self.spl[1], spl3[1])
assert_equal(self.spl[2], spl3[2])
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
assert_allclose(dy, dy2, rtol=2e-6)
else:
assert_allclose(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
assert_allclose(y1, y2)
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
def test_multidim(self):
# c can have trailing dims
for n in range(3):
t, c, k = self.spl
c2 = np.c_[c, c, c]
c2 = np.dstack((c2, c2))
spl2 = splantider((t, c2, k), n)
spl3 = splder(spl2, n)
assert_allclose(t, spl3[0])
assert_allclose(c2, spl3[1])
assert_equal(k, spl3[2])
class TestBisplrep(object):
def test_overflow(self):
a = np.linspace(0, 1, 620)
b = np.linspace(0, 1, 620)
x, y = np.meshgrid(a, b)
z = np.random.rand(*x.shape)
assert_raises(OverflowError, bisplrep, x.ravel(), y.ravel(), z.ravel(), s=0)
def test_regression_1310(self):
# Regression test for gh-1310
data = np.load(data_file('bug-1310.npz'))['data']
# Shouldn't crash -- the input data triggers work array sizes
# that caused previously some data to not be aligned on
# sizeof(double) boundaries in memory, which made the Fortran
# code to crash when compiled with -O3
bisplrep(data[:,0], data[:,1], data[:,2], kx=3, ky=3, s=0,
full_output=True)
def test_dblint():
# Basic test to see it runs and gives the correct result on a trivial
# problem. Note that `dblint` is not exposed in the interpolate namespace.
x = np.linspace(0, 1)
y = np.linspace(0, 1)
xx, yy = np.meshgrid(x, y)
rect = interpolate.RectBivariateSpline(x, y, 4 * xx * yy)
tck = list(rect.tck)
tck.extend(rect.degrees)
assert_almost_equal(dblint(0, 1, 0, 1, tck), 1)
assert_almost_equal(dblint(0, 0.5, 0, 1, tck), 0.25)
assert_almost_equal(dblint(0.5, 1, 0, 1, tck), 0.75)
assert_almost_equal(dblint(-100, 100, -100, 100, tck), 1)
def test_splev_der_k():
# regression test for gh-2188: splev(x, tck, der=k) gives garbage or crashes
# for x outside of knot range
# test case from gh-2188
tck = (np.array([0., 0., 2.5, 2.5]),
np.array([-1.56679978, 2.43995873, 0., 0.]),
1)
t, c, k = tck
x = np.array([-3, 0, 2.5, 3])
# an explicit form of the linear spline
assert_allclose(splev(x, tck), c[0] + (c[1] - c[0]) * x/t[2])
assert_allclose(splev(x, tck, 1), (c[1]-c[0]) / t[2])
# now check a random spline vs splder
np.random.seed(1234)
x = np.sort(np.random.random(30))
y = np.random.random(30)
t, c, k = splrep(x, y)
x = [t[0] - 1., t[-1] + 1.]
tck2 = splder((t, c, k), k)
assert_allclose(splev(x, (t, c, k), k), splev(x, tck2))
def test_bisplev_integer_overflow():
np.random.seed(1)
x = np.linspace(0, 1, 11)
y = x
z = np.random.randn(11, 11).ravel()
kx = 1
ky = 1
nx, tx, ny, ty, c, fp, ier = regrid_smth(
x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0)
tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky)
xp = np.zeros([2621440])
yp = np.zeros([2621440])
assert_raises((RuntimeError, MemoryError), bisplev, xp, yp, tck)
| 15,120 | 31.588362 | 129 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_gil.py
|
from __future__ import division, print_function, absolute_import
import itertools
import threading
import time
import numpy as np
from numpy.testing import assert_equal
import pytest
import scipy.interpolate
class TestGIL(object):
"""Check if the GIL is properly released by scipy.interpolate functions."""
def setup_method(self):
self.messages = []
def log(self, message):
self.messages.append(message)
def make_worker_thread(self, target, args):
log = self.log
class WorkerThread(threading.Thread):
def run(self):
log('interpolation started')
target(*args)
log('interpolation complete')
return WorkerThread()
@pytest.mark.slow
@pytest.mark.xfail(reason='race conditions, may depend on system load')
def test_rectbivariatespline(self):
def generate_params(n_points):
x = y = np.linspace(0, 1000, n_points)
x_grid, y_grid = np.meshgrid(x, y)
z = x_grid * y_grid
return x, y, z
def calibrate_delay(requested_time):
for n_points in itertools.count(5000, 1000):
args = generate_params(n_points)
time_started = time.time()
interpolate(*args)
if time.time() - time_started > requested_time:
return args
def interpolate(x, y, z):
scipy.interpolate.RectBivariateSpline(x, y, z)
args = calibrate_delay(requested_time=3)
worker_thread = self.make_worker_thread(interpolate, args)
worker_thread.start()
for i in range(3):
time.sleep(0.5)
self.log('working')
worker_thread.join()
assert_equal(self.messages, [
'interpolation started',
'working',
'working',
'working',
'interpolation complete',
])
| 1,948 | 27.661765 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_bsplines.py
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_
from scipy._lib._numpy_compat import suppress_warnings
from pytest import raises as assert_raises
import pytest
from scipy.interpolate import (BSpline, BPoly, PPoly, make_interp_spline,
make_lsq_spline, _bspl, splev, splrep, splprep, splder, splantider,
sproot, splint, insert)
import scipy.linalg as sl
from scipy.interpolate._bsplines import _not_a_knot, _augknt
import scipy.interpolate._fitpack_impl as _impl
from scipy.interpolate._fitpack import _splint
class TestBSpline(object):
def test_ctor(self):
# knots should be an ordered 1D array of finite real numbers
assert_raises((TypeError, ValueError), BSpline,
**dict(t=[1, 1.j], c=[1.], k=0))
with np.errstate(invalid='ignore'):
assert_raises(ValueError, BSpline, **dict(t=[1, np.nan], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, np.inf], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[1, -1], c=[1.], k=0))
assert_raises(ValueError, BSpline, **dict(t=[[1], [1]], c=[1.], k=0))
# for n+k+1 knots and degree k need at least n coefficients
assert_raises(ValueError, BSpline, **dict(t=[0, 1, 2], c=[1], k=0))
assert_raises(ValueError, BSpline,
**dict(t=[0, 1, 2, 3, 4], c=[1., 1.], k=2))
# non-integer orders
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k="cubic"))
assert_raises(ValueError, BSpline,
**dict(t=[0., 0., 1., 2., 3., 4.], c=[1., 1., 1.], k=2.5))
# basic interval cannot have measure zero (here: [1..1])
assert_raises(ValueError, BSpline,
**dict(t=[0., 0, 1, 1, 2, 3], c=[1., 1, 1], k=2))
# tck vs self.tck
n, k = 11, 3
t = np.arange(n+k+1)
c = np.random.random(n)
b = BSpline(t, c, k)
assert_allclose(t, b.t)
assert_allclose(c, b.c)
assert_equal(k, b.k)
def test_tck(self):
b = _make_random_spline()
tck = b.tck
assert_allclose(b.t, tck[0], atol=1e-15, rtol=1e-15)
assert_allclose(b.c, tck[1], atol=1e-15, rtol=1e-15)
assert_equal(b.k, tck[2])
# b.tck is read-only
try:
b.tck = 'foo'
except AttributeError:
pass
except:
raise AssertionError("AttributeError not raised.")
def test_degree_0(self):
xx = np.linspace(0, 1, 10)
b = BSpline(t=[0, 1], c=[3.], k=0)
assert_allclose(b(xx), 3)
b = BSpline(t=[0, 0.35, 1], c=[3, 4], k=0)
assert_allclose(b(xx), np.where(xx < 0.35, 3, 4))
def test_degree_1(self):
t = [0, 1, 2, 3, 4]
c = [1, 2, 3]
k = 1
b = BSpline(t, c, k)
x = np.linspace(1, 3, 50)
assert_allclose(c[0]*B_012(x) + c[1]*B_012(x-1) + c[2]*B_012(x-2),
b(x), atol=1e-14)
assert_allclose(splev(x, (t, c, k)), b(x), atol=1e-14)
def test_bernstein(self):
# a special knot vector: Bernstein polynomials
k = 3
t = np.asarray([0]*(k+1) + [1]*(k+1))
c = np.asarray([1., 2., 3., 4.])
bp = BPoly(c.reshape(-1, 1), [0, 1])
bspl = BSpline(t, c, k)
xx = np.linspace(-1., 2., 10)
assert_allclose(bp(xx, extrapolate=True),
bspl(xx, extrapolate=True), atol=1e-14)
assert_allclose(splev(xx, (t, c, k)),
bspl(xx), atol=1e-14)
def test_rndm_naive_eval(self):
# test random coefficient spline *on the base interval*,
# t[k] <= x < t[-k-1]
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
y_b = b(xx)
y_n = [_naive_eval(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n, atol=1e-14)
y_n2 = [_naive_eval_2(x, t, c, k) for x in xx]
assert_allclose(y_b, y_n2, atol=1e-14)
def test_rndm_splev(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 50)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
def test_rndm_splrep(self):
np.random.seed(1234)
x = np.sort(np.random.random(20))
y = np.random.random(20)
tck = splrep(x, y)
b = BSpline(*tck)
t, k = b.t, b.k
xx = np.linspace(t[k], t[-k-1], 80)
assert_allclose(b(xx), splev(xx, tck), atol=1e-14)
def test_rndm_unity(self):
b = _make_random_spline()
b.c = np.ones_like(b.c)
xx = np.linspace(b.t[b.k], b.t[-b.k-1], 100)
assert_allclose(b(xx), 1.)
def test_vectorization(self):
n, k = 22, 3
t = np.sort(np.random.random(n))
c = np.random.random(size=(n, 6, 7))
b = BSpline(t, c, k)
tm, tp = t[k], t[-k-1]
xx = tm + (tp - tm) * np.random.random((3, 4, 5))
assert_equal(b(xx).shape, (3, 4, 5, 6, 7))
def test_len_c(self):
# for n+k+1 knots, only first n coefs are used.
# and BTW this is consistent with FITPACK
n, k = 33, 3
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
# pad coefficients with random garbage
c_pad = np.r_[c, np.random.random(k+1)]
b, b_pad = BSpline(t, c, k), BSpline(t, c_pad, k)
dt = t[-1] - t[0]
xx = np.linspace(t[0] - dt, t[-1] + dt, 50)
assert_allclose(b(xx), b_pad(xx), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c, k)), atol=1e-14)
assert_allclose(b(xx), splev(xx, (t, c_pad, k)), atol=1e-14)
def test_endpoints(self):
# base interval is closed
b = _make_random_spline()
t, _, k = b.tck
tm, tp = t[k], t[-k-1]
for extrap in (True, False):
assert_allclose(b([tm, tp], extrap),
b([tm + 1e-10, tp - 1e-10], extrap), atol=1e-9)
def test_continuity(self):
# assert continuity at internal knots
b = _make_random_spline()
t, _, k = b.tck
assert_allclose(b(t[k+1:-k-1] - 1e-10), b(t[k+1:-k-1] + 1e-10),
atol=1e-9)
def test_extrap(self):
b = _make_random_spline()
t, c, k = b.tck
dt = t[-1] - t[0]
xx = np.linspace(t[k] - dt, t[-k-1] + dt, 50)
mask = (t[k] < xx) & (xx < t[-k-1])
# extrap has no effect within the base interval
assert_allclose(b(xx[mask], extrapolate=True),
b(xx[mask], extrapolate=False))
# extrapolated values agree with FITPACK
assert_allclose(b(xx, extrapolate=True),
splev(xx, (t, c, k), ext=0))
def test_default_extrap(self):
# BSpline defaults to extrapolate=True
b = _make_random_spline()
t, _, k = b.tck
xx = [t[0] - 1, t[-1] + 1]
yy = b(xx)
assert_(not np.all(np.isnan(yy)))
def test_periodic_extrap(self):
np.random.seed(1234)
t = np.sort(np.random.random(8))
c = np.random.random(4)
k = 3
b = BSpline(t, c, k, extrapolate='periodic')
n = t.size - (k + 1)
dt = t[-1] - t[0]
xx = np.linspace(t[k] - dt, t[n] + dt, 50)
xy = t[k] + (xx - t[k]) % (t[n] - t[k])
assert_allclose(b(xx), splev(xy, (t, c, k)))
# Direct check
xx = [-1, 0, 0.5, 1]
xy = t[k] + (xx - t[k]) % (t[n] - t[k])
assert_equal(b(xx, extrapolate='periodic'), b(xy, extrapolate=True))
def test_ppoly(self):
b = _make_random_spline()
t, c, k = b.tck
pp = PPoly.from_spline((t, c, k))
xx = np.linspace(t[k], t[-k], 100)
assert_allclose(b(xx), pp(xx), atol=1e-14, rtol=1e-14)
def test_derivative_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[0], t[-1], 50)
xx = np.r_[xx, t]
for der in range(1, k+1):
yd = splev(xx, (t, c, k), der=der)
assert_allclose(yd, b(xx, nu=der), atol=1e-14)
# higher derivatives all vanish
assert_allclose(b(xx, nu=k+1), 0, atol=1e-14)
def test_derivative_jumps(self):
# example from de Boor, Chap IX, example (24)
# NB: knots augmented & corresp coefs are zeroed out
# in agreement with the convention (29)
k = 2
t = [-1, -1, 0, 1, 1, 3, 4, 6, 6, 6, 7, 7]
np.random.seed(1234)
c = np.r_[0, 0, np.random.random(5), 0, 0]
b = BSpline(t, c, k)
# b is continuous at x != 6 (triple knot)
x = np.asarray([1, 3, 4, 6])
assert_allclose(b(x[x != 6] - 1e-10),
b(x[x != 6] + 1e-10))
assert_(not np.allclose(b(6.-1e-10), b(6+1e-10)))
# 1st derivative jumps at double knots, 1 & 6:
x0 = np.asarray([3, 4])
assert_allclose(b(x0 - 1e-10, nu=1),
b(x0 + 1e-10, nu=1))
x1 = np.asarray([1, 6])
assert_(not np.all(np.allclose(b(x1 - 1e-10, nu=1),
b(x1 + 1e-10, nu=1))))
# 2nd derivative is not guaranteed to be continuous either
assert_(not np.all(np.allclose(b(x - 1e-10, nu=2),
b(x + 1e-10, nu=2))))
def test_basis_element_quadratic(self):
xx = np.linspace(-1, 4, 20)
b = BSpline.basis_element(t=[0, 1, 2, 3])
assert_allclose(b(xx),
splev(xx, (b.t, b.c, b.k)), atol=1e-14)
assert_allclose(b(xx),
B_0123(xx), atol=1e-14)
b = BSpline.basis_element(t=[0, 1, 1, 2])
xx = np.linspace(0, 2, 10)
assert_allclose(b(xx),
np.where(xx < 1, xx*xx, (2.-xx)**2), atol=1e-14)
def test_basis_element_rndm(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx), _sum_basis_elements(xx, t, c, k), atol=1e-14)
def test_cmplx(self):
b = _make_random_spline()
t, c, k = b.tck
cc = c * (1. + 3.j)
b = BSpline(t, cc, k)
b_re = BSpline(t, b.c.real, k)
b_im = BSpline(t, b.c.imag, k)
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b(xx).real, b_re(xx), atol=1e-14)
assert_allclose(b(xx).imag, b_im(xx), atol=1e-14)
def test_nan(self):
# nan in, nan out.
b = BSpline.basis_element([0, 1, 1, 2])
assert_(np.isnan(b(np.nan)))
def test_derivative_method(self):
b = _make_random_spline(k=5)
t, c, k = b.tck
b0 = BSpline(t, c, k)
xx = np.linspace(t[k], t[-k-1], 20)
for j in range(1, k):
b = b.derivative()
assert_allclose(b0(xx, j), b(xx), atol=1e-12, rtol=1e-12)
def test_antiderivative_method(self):
b = _make_random_spline()
t, c, k = b.tck
xx = np.linspace(t[k], t[-k-1], 20)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
# repeat with n-D array for c
c = np.c_[c, c, c]
c = np.dstack((c, c))
b = BSpline(t, c, k)
assert_allclose(b.antiderivative().derivative()(xx),
b(xx), atol=1e-14, rtol=1e-14)
def test_integral(self):
b = BSpline.basis_element([0, 1, 2]) # x for x < 1 else 2 - x
assert_allclose(b.integrate(0, 1), 0.5)
assert_allclose(b.integrate(1, 0), -1 * 0.5)
assert_allclose(b.integrate(1, 0), -0.5)
# extrapolate or zeros outside of [0, 2]; default is yes
assert_allclose(b.integrate(-1, 1), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=True), 0)
assert_allclose(b.integrate(-1, 1, extrapolate=False), 0.5)
assert_allclose(b.integrate(1, -1, extrapolate=False), -1 * 0.5)
# Test ``_fitpack._splint()``
t, c, k = b.tck
assert_allclose(b.integrate(1, -1, extrapolate=False),
_splint(t, c, k, 1, -1)[0])
# Test ``extrapolate='periodic'``.
b.extrapolate = 'periodic'
i = b.antiderivative()
period_int = i(2) - i(0)
assert_allclose(b.integrate(0, 2), period_int)
assert_allclose(b.integrate(2, 0), -1 * period_int)
assert_allclose(b.integrate(-9, -7), period_int)
assert_allclose(b.integrate(-8, -4), 2 * period_int)
assert_allclose(b.integrate(0.5, 1.5), i(1.5) - i(0.5))
assert_allclose(b.integrate(1.5, 3), i(1) - i(0) + i(2) - i(1.5))
assert_allclose(b.integrate(1.5 + 12, 3 + 12),
i(1) - i(0) + i(2) - i(1.5))
assert_allclose(b.integrate(1.5, 3 + 12),
i(1) - i(0) + i(2) - i(1.5) + 6 * period_int)
assert_allclose(b.integrate(0, -1), i(0) - i(1))
assert_allclose(b.integrate(-9, -10), i(0) - i(1))
assert_allclose(b.integrate(0, -9), i(1) - i(2) - 4 * period_int)
def test_integrate_ppoly(self):
# test .integrate method to be consistent with PPoly.integrate
x = [0, 1, 2, 3, 4]
b = make_interp_spline(x, x)
b.extrapolate = 'periodic'
p = PPoly.from_spline(b)
for x0, x1 in [(-5, 0.5), (0.5, 5), (-4, 13)]:
assert_allclose(b.integrate(x0, x1),
p.integrate(x0, x1))
def test_subclassing(self):
# classmethods should not decay to the base class
class B(BSpline):
pass
b = B.basis_element([0, 1, 2, 2])
assert_equal(b.__class__, B)
assert_equal(b.derivative().__class__, B)
assert_equal(b.antiderivative().__class__, B)
def test_axis(self):
n, k = 22, 3
t = np.linspace(0, 1, n + k + 1)
sh0 = [6, 7, 8]
for axis in range(4):
sh = sh0[:]
sh.insert(axis, n) # [22, 6, 7, 8] etc
c = np.random.random(size=sh)
b = BSpline(t, c, k, axis=axis)
assert_equal(b.c.shape,
[sh[axis],] + sh[:axis] + sh[axis+1:])
xp = np.random.random((3, 4, 5))
assert_equal(b(xp).shape,
sh[:axis] + list(xp.shape) + sh[axis+1:])
#0 <= axis < c.ndim
for ax in [-1, len(sh)+1]:
assert_raises(ValueError, BSpline, **dict(t=t, c=c, k=k, axis=ax))
# derivative, antiderivative keeps the axis
for b1 in [BSpline(t, c, k, axis=axis).derivative(),
BSpline(t, c, k, axis=axis).derivative(2),
BSpline(t, c, k, axis=axis).antiderivative(),
BSpline(t, c, k, axis=axis).antiderivative(2)]:
assert_equal(b1.axis, b.axis)
def test_knots_multiplicity():
# Take a spline w/ random coefficients, throw in knots of varying
# multiplicity.
def check_splev(b, j, der=0, atol=1e-14, rtol=1e-14):
# check evaluations against FITPACK, incl extrapolations
t, c, k = b.tck
x = np.unique(t)
x = np.r_[t[0]-0.1, 0.5*(x[1:] + x[:1]), t[-1]+0.1]
assert_allclose(splev(x, (t, c, k), der), b(x, der),
atol=atol, rtol=rtol, err_msg='der = %s k = %s' % (der, b.k))
# test loop itself
# [the index `j` is for interpreting the traceback in case of a failure]
for k in [1, 2, 3, 4, 5]:
b = _make_random_spline(k=k)
for j, b1 in enumerate(_make_multiples(b)):
check_splev(b1, j)
for der in range(1, k+1):
check_splev(b1, j, der, 1e-12, 1e-12)
### stolen from @pv, verbatim
def _naive_B(x, k, i, t):
"""
Naive way to compute B-spline basis functions. Useful only for testing!
computes B(x; t[i],..., t[i+k+1])
"""
if k == 0:
return 1.0 if t[i] <= x < t[i+1] else 0.0
if t[i+k] == t[i]:
c1 = 0.0
else:
c1 = (x - t[i])/(t[i+k] - t[i]) * _naive_B(x, k-1, i, t)
if t[i+k+1] == t[i+1]:
c2 = 0.0
else:
c2 = (t[i+k+1] - x)/(t[i+k+1] - t[i+1]) * _naive_B(x, k-1, i+1, t)
return (c1 + c2)
### stolen from @pv, verbatim
def _naive_eval(x, t, c, k):
"""
Naive B-spline evaluation. Useful only for testing!
"""
if x == t[k]:
i = k
else:
i = np.searchsorted(t, x) - 1
assert t[i] <= x <= t[i+1]
assert i >= k and i < len(t) - k
return sum(c[i-j] * _naive_B(x, k, i-j, t) for j in range(0, k+1))
def _naive_eval_2(x, t, c, k):
"""Naive B-spline evaluation, another way."""
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
assert t[k] <= x <= t[n]
return sum(c[i] * _naive_B(x, k, i, t) for i in range(n))
def _sum_basis_elements(x, t, c, k):
n = len(t) - (k+1)
assert n >= k+1
assert len(c) >= n
s = 0.
for i in range(n):
b = BSpline.basis_element(t[i:i+k+2], extrapolate=False)(x)
s += c[i] * np.nan_to_num(b) # zero out out-of-bounds elements
return s
def B_012(x):
""" A linear B-spline function B(x | 0, 1, 2)."""
x = np.atleast_1d(x)
return np.piecewise(x, [(x < 0) | (x > 2),
(x >= 0) & (x < 1),
(x >= 1) & (x <= 2)],
[lambda x: 0., lambda x: x, lambda x: 2.-x])
def B_0123(x, der=0):
"""A quadratic B-spline function B(x | 0, 1, 2, 3)."""
x = np.atleast_1d(x)
conds = [x < 1, (x > 1) & (x < 2), x > 2]
if der == 0:
funcs = [lambda x: x*x/2.,
lambda x: 3./4 - (x-3./2)**2,
lambda x: (3.-x)**2 / 2]
elif der == 2:
funcs = [lambda x: 1.,
lambda x: -2.,
lambda x: 1.]
else:
raise ValueError('never be here: der=%s' % der)
pieces = np.piecewise(x, conds, funcs)
return pieces
def _make_random_spline(n=35, k=3):
np.random.seed(123)
t = np.sort(np.random.random(n+k+1))
c = np.random.random(n)
return BSpline.construct_fast(t, c, k)
def _make_multiples(b):
"""Increase knot multiplicity."""
c, k = b.c, b.k
t1 = b.t.copy()
t1[17:19] = t1[17]
t1[22] = t1[21]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[:k+1] = t1[0]
yield BSpline(t1, c, k)
t1 = b.t.copy()
t1[-k-1:] = t1[-1]
yield BSpline(t1, c, k)
class TestInterop(object):
#
# Test that FITPACK-based spl* functions can deal with BSpline objects
#
def setup_method(self):
xx = np.linspace(0, 4.*np.pi, 41)
yy = np.cos(xx)
b = make_interp_spline(xx, yy)
self.tck = (b.t, b.c, b.k)
self.xx, self.yy, self.b = xx, yy, b
self.xnew = np.linspace(0, 4.*np.pi, 21)
c2 = np.c_[b.c, b.c, b.c]
self.c2 = np.dstack((c2, c2))
self.b2 = BSpline(b.t, self.c2, b.k)
def test_splev(self):
xnew, b, b2 = self.xnew, self.b, self.b2
# check that splev works with 1D array of coefficients
# for array and scalar `x`
assert_allclose(splev(xnew, b),
b(xnew), atol=1e-15, rtol=1e-15)
assert_allclose(splev(xnew, b.tck),
b(xnew), atol=1e-15, rtol=1e-15)
assert_allclose([splev(x, b) for x in xnew],
b(xnew), atol=1e-15, rtol=1e-15)
# With n-D coefficients, there's a quirck:
# splev(x, BSpline) is equivalent to BSpline(x)
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling splev.. with BSpline objects with c.ndim > 1 is not recommended.")
assert_allclose(splev(xnew, b2), b2(xnew), atol=1e-15, rtol=1e-15)
# However, splev(x, BSpline.tck) needs some transposes. This is because
# BSpline interpolates along the first axis, while the legacy FITPACK
# wrapper does list(map(...)) which effectively interpolates along the
# last axis. Like so:
sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
cc = b2.c.transpose(sh)
tck = (b2.t, cc, b2.k)
assert_allclose(splev(xnew, tck),
b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
def test_splrep(self):
x, y = self.xx, self.yy
# test that "new" splrep is equivalent to _impl.splrep
tck = splrep(x, y)
t, c, k = _impl.splrep(x, y)
assert_allclose(tck[0], t, atol=1e-15)
assert_allclose(tck[1], c, atol=1e-15)
assert_equal(tck[2], k)
# also cover the `full_output=True` branch
tck_f, _, _, _ = splrep(x, y, full_output=True)
assert_allclose(tck_f[0], t, atol=1e-15)
assert_allclose(tck_f[1], c, atol=1e-15)
assert_equal(tck_f[2], k)
# test that the result of splrep roundtrips with splev:
# evaluate the spline on the original `x` points
yy = splev(x, tck)
assert_allclose(y, yy, atol=1e-15)
# ... and also it roundtrips if wrapped in a BSpline
b = BSpline(*tck)
assert_allclose(y, b(x), atol=1e-15)
def test_splrep_errors(self):
# test that both "old" and "new" splrep raise for an n-D ``y`` array
# with n > 1
x, y = self.xx, self.yy
y2 = np.c_[y, y]
msg = "failed in converting 3rd argument `y' of dfitpack.curfit to C/Fortran array"
with assert_raises(Exception, message=msg):
splrep(x, y2)
with assert_raises(Exception, message=msg):
_impl.splrep(x, y2)
# input below minimum size
with assert_raises(TypeError, message="m > k must hold"):
splrep(x[:3], y[:3])
with assert_raises(TypeError, message="m > k must hold"):
_impl.splrep(x[:3], y[:3])
def test_splprep(self):
x = np.arange(15).reshape((3, 5))
b, u = splprep(x)
tck, u1 = _impl.splprep(x)
# test the roundtrip with splev for both "old" and "new" output
assert_allclose(u, u1, atol=1e-15)
assert_allclose(splev(u, b), x, atol=1e-15)
assert_allclose(splev(u, tck), x, atol=1e-15)
# cover the ``full_output=True`` branch
(b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
assert_allclose(u, u_f, atol=1e-15)
assert_allclose(splev(u_f, b_f), x, atol=1e-15)
def test_splprep_errors(self):
# test that both "old" and "new" code paths raise for x.ndim > 2
x = np.arange(3*4*5).reshape((3, 4, 5))
with assert_raises(ValueError, message="too many values to unpack"):
splprep(x)
with assert_raises(ValueError, message="too many values to unpack"):
_impl.splprep(x)
# input below minimum size
x = np.linspace(0, 40, num=3)
with assert_raises(TypeError, message="m > k must hold"):
splprep([x])
with assert_raises(TypeError, message="m > k must hold"):
_impl.splprep([x])
# automatically calculated parameters are non-increasing
# see gh-7589
x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
with assert_raises(ValueError, message="Invalid inputs"):
splprep([x])
with assert_raises(ValueError, message="Invalid inputs"):
_impl.splprep([x])
# given non-increasing parameter values u
x = [1, 3, 2, 4]
u = [0, 0.3, 0.2, 1]
with assert_raises(ValueError, message="Invalid inputs"):
splprep(*[[x], None, u])
def test_sproot(self):
b, b2 = self.b, self.b2
roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
# sproot accepts a BSpline obj w/ 1D coef array
assert_allclose(sproot(b), roots, atol=1e-7, rtol=1e-7)
assert_allclose(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
# ... and deals with trailing dimensions if coef array is n-D
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling sproot.. with BSpline objects with c.ndim > 1 is not recommended.")
r = sproot(b2, mest=50)
r = np.asarray(r)
assert_equal(r.shape, (3, 2, 4))
assert_allclose(r - roots, 0, atol=1e-12)
# and legacy behavior is preserved for a tck tuple w/ n-D coef
c2r = b2.c.transpose(1, 2, 0)
rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
assert_equal(rr.shape, (3, 2, 4))
assert_allclose(rr - roots, 0, atol=1e-12)
def test_splint(self):
# test that splint accepts BSpline objects
b, b2 = self.b, self.b2
assert_allclose(splint(0, 1, b),
splint(0, 1, b.tck), atol=1e-14)
assert_allclose(splint(0, 1, b),
b.integrate(0, 1), atol=1e-14)
# ... and deals with n-D arrays of coefficients
with suppress_warnings() as sup:
sup.filter(DeprecationWarning,
"Calling splint.. with BSpline objects with c.ndim > 1 is not recommended.")
assert_allclose(splint(0, 1, b2), b2.integrate(0, 1), atol=1e-14)
# and the legacy behavior is preserved for a tck tuple w/ n-D coef
c2r = b2.c.transpose(1, 2, 0)
integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
assert_equal(integr.shape, (3, 2))
assert_allclose(integr,
splint(0, 1, b), atol=1e-14)
def test_splder(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
if ct > 0:
b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
for n in [1, 2, 3]:
bd = splder(b)
tck_d = _impl.splder((b.t, b.c, b.k))
assert_allclose(bd.t, tck_d[0], atol=1e-15)
assert_allclose(bd.c, tck_d[1], atol=1e-15)
assert_equal(bd.k, tck_d[2])
assert_(isinstance(bd, BSpline))
assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
def test_splantider(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
if ct > 0:
b.c = np.r_[b.c, np.zeros((ct,) + b.c.shape[1:])]
for n in [1, 2, 3]:
bd = splantider(b)
tck_d = _impl.splantider((b.t, b.c, b.k))
assert_allclose(bd.t, tck_d[0], atol=1e-15)
assert_allclose(bd.c, tck_d[1], atol=1e-15)
assert_equal(bd.k, tck_d[2])
assert_(isinstance(bd, BSpline))
assert_(isinstance(tck_d, tuple)) # back-compat: tck in and out
def test_insert(self):
b, b2, xx = self.b, self.b2, self.xx
j = b.t.size // 2
tn = 0.5*(b.t[j] + b.t[j+1])
bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
assert_allclose(splev(xx, bn),
splev(xx, tck_n), atol=1e-15)
assert_(isinstance(bn, BSpline))
assert_(isinstance(tck_n, tuple)) # back-compat: tck in, tck out
# for n-D array of coefficients, BSpline.c needs to be transposed
# after that, the results are equivalent.
sh = tuple(range(b2.c.ndim))
c_ = b2.c.transpose(sh[1:] + (0,))
tck_n2 = insert(tn, (b2.t, c_, b2.k))
bn2 = insert(tn, b2)
# need a transpose for comparing the results, cf test_splev
assert_allclose(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
bn2(xx), atol=1e-15)
assert_(isinstance(bn2, BSpline))
assert_(isinstance(tck_n2, tuple)) # back-compat: tck in, tck out
class TestInterp(object):
#
# Test basic ways of constructing interpolating splines.
#
xx = np.linspace(0., 2.*np.pi)
yy = np.sin(xx)
def test_order_0(self):
b = make_interp_spline(self.xx, self.yy, k=0)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_linear(self):
b = make_interp_spline(self.xx, self.yy, k=1)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_not_a_knot(self):
for k in [3, 5]:
b = make_interp_spline(self.xx, self.yy, k)
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_quadratic_deriv(self):
der = [(1, 8.)] # order, value: f'(x) = 8.
# derivative at right-hand edge
b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(None, der))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[-1], 1), der[0][1], atol=1e-14, rtol=1e-14)
# derivative at left-hand edge
b = make_interp_spline(self.xx, self.yy, k=2, bc_type=(der, None))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[0], 1), der[0][1], atol=1e-14, rtol=1e-14)
def test_cubic_deriv(self):
k = 3
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.)], [(1, 4.)]
b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(self.xx[0], 1), b(self.xx[-1], 1)],
[der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
# 'natural' cubic spline, zero out 2nd derivatives at the boundaries
der_l, der_r = [(2, 0)], [(2, 0)]
b = make_interp_spline(self.xx, self.yy, k, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_quintic_derivs(self):
k, n = 5, 7
x = np.arange(n).astype(np.float_)
y = np.sin(x)
der_l = [(1, -12.), (2, 1)]
der_r = [(1, 8.), (2, 3.)]
b = make_interp_spline(x, y, k=k, bc_type=(der_l, der_r))
assert_allclose(b(x), y, atol=1e-14, rtol=1e-14)
assert_allclose([b(x[0], 1), b(x[0], 2)],
[val for (nu, val) in der_l])
assert_allclose([b(x[-1], 1), b(x[-1], 2)],
[val for (nu, val) in der_r])
@pytest.mark.xfail(reason='unstable')
def test_cubic_deriv_unstable(self):
# 1st and 2nd derivative at x[0], no derivative information at x[-1]
# The problem is not that it fails [who would use this anyway],
# the problem is that it fails *silently*, and I've no idea
# how to detect this sort of instability.
# In this particular case: it's OK for len(t) < 20, goes haywire
# at larger `len(t)`.
k = 3
t = _augknt(self.xx, k)
der_l = [(1, 3.), (2, 4.)]
b = make_interp_spline(self.xx, self.yy, k, t, bc_type=(der_l, None))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
def test_knots_not_data_sites(self):
# Knots need not coincide with the data sites.
# use a quadratic spline, knots are at data averages,
# two additional constraints are zero 2nd derivs at edges
k = 2
t = np.r_[(self.xx[0],)*(k+1),
(self.xx[1:] + self.xx[:-1]) / 2.,
(self.xx[-1],)*(k+1)]
b = make_interp_spline(self.xx, self.yy, k, t,
bc_type=([(2, 0)], [(2, 0)]))
assert_allclose(b(self.xx), self.yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(self.xx[0], 2), b(self.xx[-1], 2)], [0., 0.],
atol=1e-14)
def test_minimum_points_and_deriv(self):
# interpolation of f(x) = x**3 between 0 and 1. f'(x) = 3 * xx**2 and
# f'(0) = 0, f'(1) = 3.
k = 3
x = [0., 1.]
y = [0., 1.]
b = make_interp_spline(x, y, k, bc_type=([(1, 0.)], [(1, 3.)]))
xx = np.linspace(0., 1.)
yy = xx**3
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
# If one of the derivatives is omitted, the spline definition is
# incomplete:
assert_raises(ValueError, make_interp_spline, x, y, k,
**dict(bc_type=([(1, 0.)], None)))
def test_complex(self):
k = 3
xx = self.xx
yy = self.yy + 1.j*self.yy
# first derivatives at left & right edges:
der_l, der_r = [(1, 3.j)], [(1, 4.+2.j)]
b = make_interp_spline(xx, yy, k, bc_type=(der_l, der_r))
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
assert_allclose([b(xx[0], 1), b(xx[-1], 1)],
[der_l[0][1], der_r[0][1]], atol=1e-14, rtol=1e-14)
# also test zero and first order
for k in (0, 1):
b = make_interp_spline(xx, yy, k=k)
assert_allclose(b(xx), yy, atol=1e-14, rtol=1e-14)
def test_int_xy(self):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
# cython chokes on "buffer type mismatch" (construction) or
# "no matching signature found" (evaluation)
for k in (0, 1, 2, 3):
b = make_interp_spline(x, y, k=k)
b(x)
def test_sliced_input(self):
# cython code chokes on non C contiguous arrays
xx = np.linspace(-1, 1, 100)
x = xx[::5]
y = xx[::5]
for k in (0, 1, 2, 3):
make_interp_spline(x, y, k=k)
def test_check_finite(self):
# check_finite defaults to True; nans and such trigger a ValueError
x = np.arange(10).astype(float)
y = x**2
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, make_interp_spline, x, y)
@pytest.mark.parametrize('k', [1, 2, 3, 5])
def test_list_input(self, k):
# regression test for gh-8714: TypeError for x, y being lists and k=2
x = list(range(10))
y = [a**2 for a in x]
make_interp_spline(x, y, k=k)
def test_multiple_rhs(self):
yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
der_l = [(1, [1., 2.])]
der_r = [(1, [3., 4.])]
b = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
assert_allclose(b(self.xx), yy, atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[0], 1), der_l[0][1], atol=1e-14, rtol=1e-14)
assert_allclose(b(self.xx[-1], 1), der_r[0][1], atol=1e-14, rtol=1e-14)
def test_shapes(self):
np.random.seed(1234)
k, n = 3, 22
x = np.sort(np.random.random(size=n))
y = np.random.random(size=(n, 5, 6, 7))
b = make_interp_spline(x, y, k)
assert_equal(b.c.shape, (n, 5, 6, 7))
# now throw in some derivatives
d_l = [(1, np.random.random((5, 6, 7)))]
d_r = [(1, np.random.random((5, 6, 7)))]
b = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
assert_equal(b.c.shape, (n + k - 1, 5, 6, 7))
def test_string_aliases(self):
yy = np.sin(self.xx)
# a single string is duplicated
b1 = make_interp_spline(self.xx, yy, k=3, bc_type='natural')
b2 = make_interp_spline(self.xx, yy, k=3, bc_type=([(2, 0)], [(2, 0)]))
assert_allclose(b1.c, b2.c, atol=1e-15)
# two strings are handled
b1 = make_interp_spline(self.xx, yy, k=3,
bc_type=('natural', 'clamped'))
b2 = make_interp_spline(self.xx, yy, k=3,
bc_type=([(2, 0)], [(1, 0)]))
assert_allclose(b1.c, b2.c, atol=1e-15)
# one-sided BCs are OK
b1 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, 'clamped'))
b2 = make_interp_spline(self.xx, yy, k=2, bc_type=(None, [(1, 0.0)]))
assert_allclose(b1.c, b2.c, atol=1e-15)
# 'not-a-knot' is equivalent to None
b1 = make_interp_spline(self.xx, yy, k=3, bc_type='not-a-knot')
b2 = make_interp_spline(self.xx, yy, k=3, bc_type=None)
assert_allclose(b1.c, b2.c, atol=1e-15)
# unknown strings do not pass
with assert_raises(ValueError):
make_interp_spline(self.xx, yy, k=3, bc_type='typo')
# string aliases are handled for 2D values
yy = np.c_[np.sin(self.xx), np.cos(self.xx)]
der_l = [(1, [0., 0.])]
der_r = [(2, [0., 0.])]
b2 = make_interp_spline(self.xx, yy, k=3, bc_type=(der_l, der_r))
b1 = make_interp_spline(self.xx, yy, k=3,
bc_type=('clamped', 'natural'))
assert_allclose(b1.c, b2.c, atol=1e-15)
# ... and for n-D values:
np.random.seed(1234)
k, n = 3, 22
x = np.sort(np.random.random(size=n))
y = np.random.random(size=(n, 5, 6, 7))
# now throw in some derivatives
d_l = [(1, np.zeros((5, 6, 7)))]
d_r = [(1, np.zeros((5, 6, 7)))]
b1 = make_interp_spline(x, y, k, bc_type=(d_l, d_r))
b2 = make_interp_spline(x, y, k, bc_type='clamped')
assert_allclose(b1.c, b2.c, atol=1e-15)
def test_full_matrix(self):
np.random.seed(1234)
k, n = 3, 7
x = np.sort(np.random.random(size=n))
y = np.random.random(size=n)
t = _not_a_knot(x, k)
b = make_interp_spline(x, y, k, t)
cf = make_interp_full_matr(x, y, t, k)
assert_allclose(b.c, cf, atol=1e-14, rtol=1e-14)
def make_interp_full_matr(x, y, t, k):
"""Assemble an spline order k with knots t to interpolate
y(x) using full matrices.
Not-a-knot BC only.
This routine is here for testing only (even though it's functional).
"""
assert x.size == y.size
assert t.size == x.size + k + 1
n = x.size
A = np.zeros((n, n), dtype=np.float_)
for j in range(n):
xval = x[j]
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
c = sl.solve(A, y)
return c
### XXX: 'periodic' interp spline using full matrices
def make_interp_per_full_matr(x, y, t, k):
x, y, t = map(np.asarray, (x, y, t))
n = x.size
nt = t.size - k - 1
# have `n` conditions for `nt` coefficients; need nt-n derivatives
assert nt - n == k - 1
# LHS: the collocation matrix + derivatives at edges
A = np.zeros((nt, nt), dtype=np.float_)
# derivatives at x[0]:
offset = 0
if x[0] == t[k]:
left = k
else:
left = np.searchsorted(t, x[0]) - 1
if x[-1] == t[k]:
left2 = k
else:
left2 = np.searchsorted(t, x[-1]) - 1
for i in range(k-1):
bb = _bspl.evaluate_all_bspl(t, k, x[0], left, nu=i+1)
A[i, left-k:left+1] = bb
bb = _bspl.evaluate_all_bspl(t, k, x[-1], left2, nu=i+1)
A[i, left2-k:left2+1] = -bb
offset += 1
# RHS
y = np.r_[[0]*(k-1), y]
# collocation matrix
for j in range(n):
xval = x[j]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j + offset, left-k:left+1] = bb
c = sl.solve(A, y)
return c
def make_lsq_full_matrix(x, y, t, k=3):
"""Make the least-square spline, full matrices."""
x, y, t = map(np.asarray, (x, y, t))
m = x.size
n = t.size - k - 1
A = np.zeros((m, n), dtype=np.float_)
for j in range(m):
xval = x[j]
# find interval
if xval == t[k]:
left = k
else:
left = np.searchsorted(t, xval) - 1
# fill a row
bb = _bspl.evaluate_all_bspl(t, k, xval, left)
A[j, left-k:left+1] = bb
# have observation matrix, can solve the LSQ problem
B = np.dot(A.T, A)
Y = np.dot(A.T, y)
c = sl.solve(B, Y)
return c, (A, Y)
class TestLSQ(object):
#
# Test make_lsq_spline
#
np.random.seed(1234)
n, k = 13, 3
x = np.sort(np.random.random(n))
y = np.random.random(n)
t = _augknt(np.linspace(x[0], x[-1], 7), k)
def test_lstsq(self):
# check LSQ construction vs a full matrix version
x, y, t, k = self.x, self.y, self.t, self.k
c0, AY = make_lsq_full_matrix(x, y, t, k)
b = make_lsq_spline(x, y, t, k)
assert_allclose(b.c, c0)
assert_equal(b.c.shape, (t.size - k - 1,))
# also check against numpy.lstsq
aa, yy = AY
c1, _, _, _ = np.linalg.lstsq(aa, y, rcond=-1)
assert_allclose(b.c, c1)
def test_weights(self):
# weights = 1 is same as None
x, y, t, k = self.x, self.y, self.t, self.k
w = np.ones_like(x)
b = make_lsq_spline(x, y, t, k)
b_w = make_lsq_spline(x, y, t, k, w=w)
assert_allclose(b.t, b_w.t, atol=1e-14)
assert_allclose(b.c, b_w.c, atol=1e-14)
assert_equal(b.k, b_w.k)
def test_multiple_rhs(self):
x, t, k, n = self.x, self.t, self.k, self.n
y = np.random.random(size=(n, 5, 6, 7))
b = make_lsq_spline(x, y, t, k)
assert_equal(b.c.shape, (t.size-k-1, 5, 6, 7))
def test_complex(self):
# cmplx-valued `y`
x, t, k = self.x, self.t, self.k
yc = self.y * (1. + 2.j)
b = make_lsq_spline(x, yc, t, k)
b_re = make_lsq_spline(x, yc.real, t, k)
b_im = make_lsq_spline(x, yc.imag, t, k)
assert_allclose(b(x), b_re(x) + 1.j*b_im(x), atol=1e-15, rtol=1e-15)
def test_int_xy(self):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
t = _augknt(x, k=1)
# cython chokes on "buffer type mismatch"
make_lsq_spline(x, y, t, k=1)
def test_sliced_input(self):
# cython code chokes on non C contiguous arrays
xx = np.linspace(-1, 1, 100)
x = xx[::3]
y = xx[::3]
t = _augknt(x, 1)
make_lsq_spline(x, y, t, k=1)
def test_checkfinite(self):
# check_finite defaults to True; nans and such trigger a ValueError
x = np.arange(12).astype(float)
y = x**2
t = _augknt(x, 3)
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, make_lsq_spline, x, y, t)
| 42,531 | 33.748366 | 99 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_ndgriddata.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_equal, assert_array_equal, assert_allclose
from pytest import raises as assert_raises
from scipy.interpolate import griddata, NearestNDInterpolator
class TestGriddata(object):
def test_fill_value(self):
x = [(0,0), (0,1), (1,0)]
y = [1, 2, 3]
yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
assert_array_equal(yi, [-1., -1, 1])
yi = griddata(x, y, [(1,1), (1,2), (0,0)])
assert_array_equal(yi, [np.nan, np.nan, 1])
def test_alternative_call(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method,
rescale=rescale)
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
def test_multivalue_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = (np.arange(x.shape[0], dtype=np.double)[:,None]
+ np.array([0,1])[None,:])
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, x, method=method, rescale=rescale)
assert_allclose(y, yi, atol=1e-14, err_msg=msg)
def test_multipoint_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert_equal(yi.shape, (5, 3), err_msg=msg)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
def test_complex_2d(self):
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 2j*y[::-1]
xi = x[:,None,:] + np.array([0,0,0])[None,:,None]
for method in ('nearest', 'linear', 'cubic'):
for rescale in (True, False):
msg = repr((method, rescale))
yi = griddata(x, y, xi, method=method, rescale=rescale)
assert_equal(yi.shape, (5, 3), err_msg=msg)
assert_allclose(yi, np.tile(y[:,None], (1, 3)),
atol=1e-14, err_msg=msg)
def test_1d(self):
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-14)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-14)
def test_1d_borders(self):
# Test for nearest neighbor case with xi outside
# the range of the values.
x = np.array([1, 2.5, 3, 4.5, 5, 6])
y = np.array([1, 2, 0, 3.9, 2, 1])
xi = np.array([0.9, 6.5])
yi_should = np.array([1.0, 1.0])
method = 'nearest'
assert_allclose(griddata(x, y, xi,
method=method), yi_should,
err_msg=method,
atol=1e-14)
assert_allclose(griddata(x.reshape(6, 1), y, xi,
method=method), yi_should,
err_msg=method,
atol=1e-14)
assert_allclose(griddata((x, ), y, (xi, ),
method=method), yi_should,
err_msg=method,
atol=1e-14)
def test_1d_unsorted(self):
x = np.array([2.5, 1, 4.5, 5, 6, 3])
y = np.array([1, 2, 0, 3.9, 2, 1])
for method in ('nearest', 'linear', 'cubic'):
assert_allclose(griddata(x, y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y,
err_msg=method, atol=1e-10)
assert_allclose(griddata((x,), y, (x,), method=method), y,
err_msg=method, atol=1e-10)
def test_square_rescale_manual(self):
points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double)
points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double)
values = np.array([1., 2., -3., 5., 9.], dtype=np.double)
xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None],
np.linspace(0, 100, 14)[None,:])
xx = xx.ravel()
yy = yy.ravel()
xi = np.array([xx, yy]).T.copy()
for method in ('nearest', 'linear', 'cubic'):
msg = method
zi = griddata(points_rescaled, values, xi/np.array([10, 100.]),
method=method)
zi_rescaled = griddata(points, values, xi, method=method,
rescale=True)
assert_allclose(zi, zi_rescaled, err_msg=msg,
atol=1e-12)
def test_xi_1d(self):
# Check that 1-D xi is interpreted as a coordinate
x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)],
dtype=np.double)
y = np.arange(x.shape[0], dtype=np.double)
y = y - 2j*y[::-1]
xi = np.array([0.5, 0.5])
for method in ('nearest', 'linear', 'cubic'):
p1 = griddata(x, y, xi, method=method)
p2 = griddata(x, y, xi[None,:], method=method)
assert_allclose(p1, p2, err_msg=method)
xi1 = np.array([0.5])
xi3 = np.array([0.5, 0.5, 0.5])
assert_raises(ValueError, griddata, x, y, xi1,
method=method)
assert_raises(ValueError, griddata, x, y, xi3,
method=method)
def test_nearest_options():
# smoke test that NearestNDInterpolator accept cKDTree options
npts, nd = 4, 3
x = np.arange(npts*nd).reshape((npts, nd))
y = np.arange(npts)
nndi = NearestNDInterpolator(x, y)
opts = {'balanced_tree': False, 'compact_nodes': False}
nndi_o = NearestNDInterpolator(x, y, tree_options=opts)
assert_allclose(nndi(x), nndi_o(x), atol=1e-14)
| 7,208 | 39.5 | 94 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_fitpack2.py
|
# Created by Pearu Peterson, June 2003
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose)
from scipy._lib._numpy_compat import suppress_warnings
from pytest import raises as assert_raises
from numpy import array, diff, linspace, meshgrid, ones, pi, shape
from scipy.interpolate.fitpack import bisplrep, bisplev
from scipy.interpolate.fitpack2 import (UnivariateSpline,
LSQUnivariateSpline, InterpolatedUnivariateSpline,
LSQBivariateSpline, SmoothBivariateSpline, RectBivariateSpline,
LSQSphereBivariateSpline, SmoothSphereBivariateSpline,
RectSphereBivariateSpline)
class TestUnivariateSpline(object):
def test_linear_constant(self):
x = [1,2,3]
y = [3,3,3]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[3,3,3])
def test_preserve_shape(self):
x = [1, 2, 3]
y = [0, 2, 4]
lut = UnivariateSpline(x, y, k=1)
arg = 2
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
arg = [1.5, 2, 2.5]
assert_equal(shape(arg), shape(lut(arg)))
assert_equal(shape(arg), shape(lut(arg, nu=1)))
def test_linear_1d(self):
x = [1,2,3]
y = [0,2,4]
lut = UnivariateSpline(x,y,k=1)
assert_array_almost_equal(lut.get_knots(),[1,3])
assert_array_almost_equal(lut.get_coeffs(),[0,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2]),[0,1,2])
def test_subclassing(self):
# See #731
class ZeroSpline(UnivariateSpline):
def __call__(self, x):
return 0*array(x)
sp = ZeroSpline([1,2,3,4,5], [3,2,3,2,3], k=2)
assert_array_equal(sp([1.5, 2.5]), [0., 0.])
def test_empty_input(self):
# Test whether empty input returns an empty output. Ticket 1014
x = [1,3,5,7,9]
y = [0,4,9,12,21]
spl = UnivariateSpline(x, y, k=3)
assert_array_equal(spl([]), array([]))
def test_resize_regression(self):
"""Regression test for #1375."""
x = [-1., -0.65016502, -0.58856235, -0.26903553, -0.17370892,
-0.10011001, 0., 0.10011001, 0.17370892, 0.26903553, 0.58856235,
0.65016502, 1.]
y = [1.,0.62928599, 0.5797223, 0.39965815, 0.36322694, 0.3508061,
0.35214793, 0.3508061, 0.36322694, 0.39965815, 0.5797223,
0.62928599, 1.]
w = [1.00000000e+12, 6.88875973e+02, 4.89314737e+02, 4.26864807e+02,
6.07746770e+02, 4.51341444e+02, 3.17480210e+02, 4.51341444e+02,
6.07746770e+02, 4.26864807e+02, 4.89314737e+02, 6.88875973e+02,
1.00000000e+12]
spl = UnivariateSpline(x=x, y=y, w=w, s=None)
desired = array([0.35100374, 0.51715855, 0.87789547, 0.98719344])
assert_allclose(spl([0.1, 0.5, 0.9, 0.99]), desired, atol=5e-4)
def test_out_of_range_regression(self):
# Test different extrapolation modes. See ticket 3557
x = np.arange(5, dtype=float)
y = x**3
xp = linspace(-8, 13, 100)
xp_zeros = xp.copy()
xp_zeros[np.logical_or(xp_zeros < 0., xp_zeros > 4.)] = 0
xp_clip = xp.copy()
xp_clip[xp_clip < x[0]] = x[0]
xp_clip[xp_clip > x[-1]] = x[-1]
for cls in [UnivariateSpline, InterpolatedUnivariateSpline]:
spl = cls(x=x, y=y)
for ext in [0, 'extrapolate']:
assert_allclose(spl(xp, ext=ext), xp**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp**3, atol=1e-16)
for ext in [1, 'zeros']:
assert_allclose(spl(xp, ext=ext), xp_zeros**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_zeros**3, atol=1e-16)
for ext in [2, 'raise']:
assert_raises(ValueError, spl, xp, **dict(ext=ext))
for ext in [3, 'const']:
assert_allclose(spl(xp, ext=ext), xp_clip**3, atol=1e-16)
assert_allclose(cls(x, y, ext=ext)(xp), xp_clip**3, atol=1e-16)
# also test LSQUnivariateSpline [which needs explicit knots]
t = spl.get_knots()[3:4] # interior knots w/ default k=3
spl = LSQUnivariateSpline(x, y, t)
assert_allclose(spl(xp, ext=0), xp**3, atol=1e-16)
assert_allclose(spl(xp, ext=1), xp_zeros**3, atol=1e-16)
assert_raises(ValueError, spl, xp, **dict(ext=2))
assert_allclose(spl(xp, ext=3), xp_clip**3, atol=1e-16)
# also make sure that unknown values for `ext` are caught early
for ext in [-1, 'unknown']:
spl = UnivariateSpline(x, y)
assert_raises(ValueError, spl, xp, **dict(ext=ext))
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, ext=ext))
def test_lsq_fpchec(self):
xs = np.arange(100) * 1.
ys = np.arange(100) * 1.
knots = np.linspace(0, 99, 10)
bbox = (-1, 101)
assert_raises(ValueError, LSQUnivariateSpline, xs, ys, knots,
bbox=bbox)
def test_derivative_and_antiderivative(self):
# Thin wrappers to splder/splantider, so light smoke test only.
x = np.linspace(0, 1, 70)**3
y = np.cos(x)
spl = UnivariateSpline(x, y, s=0)
spl2 = spl.antiderivative(2).derivative(2)
assert_allclose(spl(0.3), spl2(0.3))
spl2 = spl.antiderivative(1)
assert_allclose(spl2(0.6) - spl2(0.2),
spl.integral(0.2, 0.6))
def test_nan(self):
# bail out early if the input data contains nans
x = np.arange(10, dtype=float)
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(x, y, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
y_end = y[-1]
for z in [np.nan, np.inf, -np.inf]:
y[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, check_finite=True))
y[-1] = y_end # check valid y but invalid w
w[-1] = z
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, w=w, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
def test_increasing_x(self):
xx = np.arange(10, dtype=float)
yy = xx**3
x = np.arange(10, dtype=float)
x[1] = x[0]
y = x**3
w = np.ones_like(x)
# also test LSQUnivariateSpline [which needs explicit knots]
spl = UnivariateSpline(xx, yy, check_finite=True)
t = spl.get_knots()[3:4] # interior knots w/ default k=3
assert_raises(ValueError, UnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, InterpolatedUnivariateSpline,
**dict(x=x, y=y, check_finite=True))
assert_raises(ValueError, LSQUnivariateSpline,
**dict(x=x, y=y, t=t, w=w, check_finite=True))
class TestLSQBivariateSpline(object):
# NOTE: The systems in this test class are rank-deficient
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
assert_equal(len(r), 1)
assert_almost_equal(lut(2,2), 3.)
def test_bilinearity(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,7,8,3,4,7,1,3,4]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1)
tx, ty = lut.get_knots()
for xa, xb in zip(tx[:-1], tx[1:]):
for ya, yb in zip(ty[:-1], ty[1:]):
for t in [0.1, 0.5, 0.9]:
for s in [0.3, 0.4, 0.7]:
xp = xa*(1-t) + xb*t
yp = ya*(1-s) + yb*s
zp = (+ lut(xa, ya)*(1-t)*(1-s)
+ lut(xb, ya)*t*(1-s)
+ lut(xa, yb)*(1-t)*s
+ lut(xb, yb)*t*s)
assert_almost_equal(lut(xp,yp), zp)
def test_integral(self):
x = [1,1,1,2,2,2,8,8,8]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
tx, ty = lut.get_knots()
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]),
trpz)
def test_empty_input(self):
# Test whether empty inputs returns an empty output. Ticket 1014
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
s = 0.1
tx = [1+s,3-s]
ty = [1+s,3-s]
with suppress_warnings() as sup:
r = sup.record(UserWarning, "\nThe coefficients of the spline")
lut = LSQBivariateSpline(x, y, z, tx, ty, kx=1, ky=1)
assert_equal(len(r), 1)
assert_array_equal(lut([], []), np.zeros((0,0)))
assert_array_equal(lut([], [], grid=False), np.zeros((0,)))
class TestSmoothBivariateSpline(object):
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[3,3,3,3])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[3,3],[3,3],[3,3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
assert_array_almost_equal(lut.get_knots(),([1,1,3,3],[1,1,3,3]))
assert_array_almost_equal(lut.get_coeffs(),[0,0,4,4])
assert_almost_equal(lut.get_residual(),0.0)
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with suppress_warnings() as sup:
# This seems to fail (ier=1, see ticket 1642).
sup.filter(UserWarning, "\nThe required storage space")
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz)
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz,
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz)
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
class TestLSQSphereBivariateSpline(object):
def setup_method(self):
# define the input data and coordinates
ntheta, nphi = 70, 90
theta = linspace(0.5/(ntheta - 1), 1 - 0.5/(ntheta - 1), ntheta) * pi
phi = linspace(0.5/(nphi - 1), 1 - 0.5/(nphi - 1), nphi) * 2. * pi
data = ones((theta.shape[0], phi.shape[0]))
# define knots and extract data values at the knots
knotst = theta[::5]
knotsp = phi[::5]
knotdata = data[::5, ::5]
# calculate spline coefficients
lats, lons = meshgrid(theta, phi)
lut_lsq = LSQSphereBivariateSpline(lats.ravel(), lons.ravel(),
data.T.ravel(), knotst, knotsp)
self.lut_lsq = lut_lsq
self.data = knotdata
self.new_lons, self.new_lats = knotsp, knotst
def test_linear_constant(self):
assert_almost_equal(self.lut_lsq.get_residual(), 0.0)
assert_array_almost_equal(self.lut_lsq(self.new_lats, self.new_lons),
self.data)
def test_empty_input(self):
assert_array_almost_equal(self.lut_lsq([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut_lsq([], [], grid=False), np.zeros((0,)))
class TestSmoothSphereBivariateSpline(object):
def setup_method(self):
theta = array([.25*pi, .25*pi, .25*pi, .5*pi, .5*pi, .5*pi, .75*pi,
.75*pi, .75*pi])
phi = array([.5 * pi, pi, 1.5 * pi, .5 * pi, pi, 1.5 * pi, .5 * pi, pi,
1.5 * pi])
r = array([3, 3, 3, 3, 3, 3, 3, 3, 3])
self.lut = SmoothSphereBivariateSpline(theta, phi, r, s=1E10)
def test_linear_constant(self):
assert_almost_equal(self.lut.get_residual(), 0.)
assert_array_almost_equal(self.lut([1, 1.5, 2],[1, 1.5]),
[[3, 3], [3, 3], [3, 3]])
def test_empty_input(self):
assert_array_almost_equal(self.lut([], []), np.zeros((0,0)))
assert_array_almost_equal(self.lut([], [], grid=False), np.zeros((0,)))
class TestRectBivariateSpline(object):
def test_defaults(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
xi = [1, 2.3, 5.3, 0.5, 3.3, 1.2, 3]
yi = [1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([[0,0,-20,0,0],[0,0,13,0,0],[0,0,4,0,0],
[0,0,-11,0,0],[0,0,4,0,0]])/6.
dy = array([[4,-1,0,1,-4],[4,-1,0,1,-4],[0,1.5,0,-1.5,0],
[2,.25,0,-.25,-2],[4,-1,0,1,-4]])
dxdy = array([[40,-25,0,25,-40],[-26,16.25,0,-16.25,26],
[-8,5,0,-5,8],[22,-13.75,0,13.75,-22],[-8,5,0,-5,8]])/6.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1),dx)
assert_array_almost_equal(lut(x,y,dy=1),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1),dxdy)
def test_derivatives(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
dx = array([0,0,2./3,0,0])
dy = array([4,-1,0,-.25,-4])
dxdy = array([160,65,0,55,32])/24.
lut = RectBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y,dx=1,grid=False),dx)
assert_array_almost_equal(lut(x,y,dy=1,grid=False),dy)
assert_array_almost_equal(lut(x,y,dx=1,dy=1,grid=False),dxdy)
def test_broadcast(self):
x = array([1,2,3,4,5])
y = array([1,2,3,4,5])
z = array([[1,2,1,2,1],[1,2,1,2,1],[1,2,3,2,1],[1,2,2,2,1],[1,2,1,2,1]])
lut = RectBivariateSpline(x,y,z)
assert_allclose(lut(x, y), lut(x[:,None], y[None,:], grid=False))
class TestRectSphereBivariateSpline(object):
def test_defaults(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
assert_array_almost_equal(lut(x,y),z)
def test_evaluate(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
yi = [0.2, 1, 2.3, 2.35, 3.0, 3.99, 5.25]
xi = [1.5, 0.4, 1.1, 0.45, 0.2345, 1., 0.0001]
zi = lut.ev(xi, yi)
zi2 = array([lut(xp, yp)[0,0] for xp, yp in zip(xi, yi)])
assert_almost_equal(zi, zi2)
def test_derivatives_grid(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_allclose(lut(x, y, dtheta=1), _numdiff_2d(lut, x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1), _numdiff_2d(lut, x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1), _numdiff_2d(lut, x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def test_derivatives(self):
y = linspace(0.01, 2*pi-0.01, 7)
x = linspace(0.01, pi-0.01, 7)
z = array([[1,2,1,2,1,2,1],[1,2,1,2,1,2,1],[1,2,3,2,1,2,1],
[1,2,2,2,1,2,1],[1,2,1,2,1,2,1],[1,2,2,2,1,2,1],
[1,2,1,2,1,2,1]])
lut = RectSphereBivariateSpline(x,y,z)
y = linspace(0.02, 2*pi-0.02, 7)
x = linspace(0.02, pi-0.02, 7)
assert_equal(lut(x, y, dtheta=1, grid=False).shape, x.shape)
assert_allclose(lut(x, y, dtheta=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dy=1),
rtol=1e-4, atol=1e-4)
assert_allclose(lut(x, y, dtheta=1, dphi=1, grid=False),
_numdiff_2d(lambda x,y: lut(x,y,grid=False), x, y, dx=1, dy=1, eps=1e-6),
rtol=1e-3, atol=1e-3)
def _numdiff_2d(func, x, y, dx=0, dy=0, eps=1e-8):
if dx == 0 and dy == 0:
return func(x, y)
elif dx == 1 and dy == 0:
return (func(x + eps, y) - func(x - eps, y)) / (2*eps)
elif dx == 0 and dy == 1:
return (func(x, y + eps) - func(x, y - eps)) / (2*eps)
elif dx == 1 and dy == 1:
return (func(x + eps, y + eps) - func(x - eps, y + eps)
- func(x + eps, y - eps) + func(x - eps, y - eps)) / (2*eps)**2
else:
raise ValueError("invalid derivative order")
| 21,199 | 40.40625 | 98 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/_plotutils.py
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = ax.ishold()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:,0], hull.points[:,1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points: bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha: float, optional
Specifies the line alpha for polygon boundaries
point_size: float, optional
Specifies the size of points
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
Examples
--------
Set of point:
>>> import matplotlib.pyplot as plt
>>> points = np.random.rand(10,2) #random
Voronoi diagram of the points:
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
>>> vor = Voronoi(points)
using `voronoi_plot_2d` for visualisation:
>>> fig = voronoi_plot_2d(vor)
using `voronoi_plot_2d` for visualisation with enhancements:
>>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
... line_width=2, line_alpha=0.6, point_size=2)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
point_size = kw.get('point_size', None)
ax.plot(vor.points[:,0], vor.points[:,1], '.', markersize=point_size)
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:,0], vor.vertices[:,1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
| 6,220 | 26.047826 | 77 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/_spherical_voronoi.py
|
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as Scipy.
#
import numpy as np
import numpy.matlib
import scipy
import itertools
from . import _voronoi
from scipy.spatial.distance import pdist
__all__ = ['SphericalVoronoi']
def sphere_check(points, radius, center):
""" Determines distance of generators from theoretical sphere
surface.
"""
actual_squared_radii = (((points[...,0] - center[0]) ** 2) +
((points[...,1] - center[1]) ** 2) +
((points[...,2] - center[2]) ** 2))
max_discrepancy = (np.sqrt(actual_squared_radii) - radius).max()
return abs(max_discrepancy)
def calc_circumcenters(tetrahedrons):
""" Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points to construct a spherical
Voronoi diagram from
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
----------
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
>>> center = np.array([0, 0, 0])
>>> radius = 1
>>> # calculate spherical Voronoi diagram
>>> sv = SphericalVoronoi(points, radius, center)
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> # generate plot
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None, threshold=1e-06):
"""
Initializes the object and starts the computation of the Voronoi
diagram.
points : The generator points of the Voronoi diagram assumed to be
all on the sphere with radius supplied by the radius parameter and
center supplied by the center parameter.
radius : The radius of the sphere. Will default to 1 if not supplied.
center : The center of the sphere. Will default to the origin if not
supplied.
"""
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
if pdist(self.points).min() <= threshold * self.radius:
raise ValueError("Duplicate generators present.")
max_discrepancy = sphere_check(self.points,
self.radius,
self.center)
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
# tetrahedrons will have shape: (2N-4, 4, 3)
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
# circumcenters will have shape: (2N-4, 3)
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
# self.vertices will have shape: (2N-4, 3)
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
# simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(self._tri.simplices.shape[0])
# tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices, simplex_indices,
simplex_indices]).ravel()
# point_indices will have shape: (6N-12,)
point_indices = self._tri.simplices.ravel()
# array_associations will have shape: (6N-12, 2)
array_associations = np.dstack((point_indices, tri_indices))[0]
array_associations = array_associations[np.lexsort((
array_associations[...,1],
array_associations[...,0]))]
array_associations = array_associations.astype(np.intp)
# group by generator indices to produce
# unsorted regions in nested list
groups = []
for k, g in itertools.groupby(array_associations,
lambda t: t[0]):
groups.append(list(list(zip(*list(g)))[1]))
self.regions = groups
def sort_vertices_of_regions(self):
"""
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
_voronoi.sort_vertices_of_regions(self._tri.simplices,
self.regions)
| 13,033 | 37.448378 | 78 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/setup.py
|
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import glob
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
from numpy.distutils.misc_util import get_info as get_misc_info
from numpy.distutils.system_info import get_info as get_sys_info
from distutils.sysconfig import get_python_inc
config = Configuration('spatial', parent_package, top_path)
config.add_data_dir('tests')
# qhull
qhull_src = list(glob.glob(join(dirname(__file__), 'qhull',
'src', '*.c')))
inc_dirs = [get_python_inc()]
if inc_dirs[0] != get_python_inc(plat_specific=1):
inc_dirs.append(get_python_inc(plat_specific=1))
inc_dirs.append(get_numpy_include_dirs())
inc_dirs.append(join(dirname(dirname(__file__)), '_lib'))
cfg = dict(get_sys_info('lapack_opt'))
cfg.setdefault('include_dirs', []).extend(inc_dirs)
config.add_extension('qhull',
sources=['qhull.c'] + qhull_src,
**cfg)
# cKDTree
ckdtree_src = ['query.cxx',
'build.cxx',
'globals.cxx',
'cpp_exc.cxx',
'query_pairs.cxx',
'count_neighbors.cxx',
'query_ball_point.cxx',
'query_ball_tree.cxx',
'sparse_distances.cxx']
ckdtree_src = [join('ckdtree', 'src', x) for x in ckdtree_src]
ckdtree_headers = ['ckdtree_decl.h',
'ckdtree_methods.h',
'coo_entries.h',
'cpp_exc.h',
'cpp_utils.h',
'distance_base.h',
'distance.h',
'ordered_pair.h',
'partial_sort.h',
'rectangle.h']
ckdtree_headers = [join('ckdtree', 'src', x) for x in ckdtree_headers]
ckdtree_dep = ['ckdtree.cxx'] + ckdtree_headers + ckdtree_src
config.add_extension('ckdtree',
sources=['ckdtree.cxx'] + ckdtree_src,
depends=ckdtree_dep,
include_dirs=inc_dirs + [join('ckdtree', 'src')])
# _distance_wrap
config.add_extension('_distance_wrap',
sources=[join('src', 'distance_wrap.c')],
depends=[join('src', 'distance_impl.h')],
include_dirs=[get_numpy_include_dirs()],
extra_info=get_misc_info("npymath"))
config.add_extension('_voronoi',
sources=['_voronoi.c'])
config.add_extension('_hausdorff',
sources=['_hausdorff.c'])
# Add license files
config.add_data_files('qhull/COPYING.txt')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 3,044 | 34.406977 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/kdtree.py
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance_p
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> from scipy.spatial import minkowski_distance
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(float)
self.mins = np.minimum(maxes,mins).astype(float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
See Also
--------
cKDTree : Implementation of `KDTree` in Cython
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=object)
ii = np.empty(retshape,dtype=object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=float)
dd.fill(np.inf)
ii = np.empty(k,dtype=int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 0:5]
>>> points = np.c_[x.ravel(), y.ravel()]
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[5, 10, 11, 15]
Query multiple points and plot the results:
>>> import matplotlib.pyplot as plt
>>> points = np.asarray(points)
>>> plt.plot(points[:,0], points[:,1], '.')
>>> for results in tree.query_ball_point(([2, 0], [3, 3]), 1):
... nearby_points = points[results]
... plt.plot(nearby_points[:,0], nearby_points[:,1], 'o')
>>> plt.margins(0.1, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
Matrix of M vectors in K dimensions.
y : (N, K) array_like
Matrix of N vectors in K dimensions.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Matrix containing the distance from every vector in `x` to every vector
in `y`.
Examples
--------
>>> from scipy.spatial import distance_matrix
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| 38,088 | 37.551619 | 137 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/_procrustes.py
|
"""
This module provides functions to perform full Procrustes analysis.
This code was originally written by Justin Kucynski and ported over from
scikit-bio by Yoshiki Vazquez-Baeza.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.linalg import orthogonal_procrustes
__all__ = ['procrustes']
def procrustes(data1, data2):
r"""Procrustes analysis, a similarity test for two data sets.
Each input matrix is a set of points or vectors (the rows of the matrix).
The dimension of the space is the number of columns of each matrix. Given
two identically sized matrices, procrustes standardizes both such that:
- :math:`tr(AA^{T}) = 1`.
- Both sets of points are centered around the origin.
Procrustes ([1]_, [2]_) then applies the optimal transform to the second
matrix (including scaling/dilation, rotations, and reflections) to minimize
:math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
pointwise differences between the two input datasets.
This function was not designed to handle datasets with different numbers of
datapoints (rows). If two data sets have different dimensionality
(different number of columns), simply add columns of zeros to the smaller
of the two.
Parameters
----------
data1 : array_like
Matrix, n rows represent points in k (columns) space `data1` is the
reference data, after it is standardised, the data from `data2` will be
transformed to fit the pattern in `data1` (must have >1 unique points).
data2 : array_like
n rows of data in k space to be fit to `data1`. Must be the same
shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
Returns
-------
mtx1 : array_like
A standardized version of `data1`.
mtx2 : array_like
The orientation of `data2` that best fits `data1`. Centered, but not
necessarily :math:`tr(AA^{T}) = 1`.
disparity : float
:math:`M^{2}` as defined above.
Raises
------
ValueError
If the input arrays are not two-dimensional.
If the shape of the input arrays is different.
If the input arrays have zero columns or zero rows.
See Also
--------
scipy.linalg.orthogonal_procrustes
scipy.spatial.distance.directed_hausdorff : Another similarity test
for two data sets
Notes
-----
- The disparity should not depend on the order of the input matrices, but
the output matrices will, as only the first output matrix is guaranteed
to be scaled such that :math:`tr(AA^{T}) = 1`.
- Duplicate data points are generally ok, duplicating a data point will
increase its effect on the procrustes fit.
- The disparity scales as the number of points per input matrix.
References
----------
.. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
.. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
Examples
--------
>>> from scipy.spatial import procrustes
The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
``a`` here:
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
"""
mtx1 = np.array(data1, dtype=np.double, copy=True)
mtx2 = np.array(data2, dtype=np.double, copy=True)
if mtx1.ndim != 2 or mtx2.ndim != 2:
raise ValueError("Input matrices must be two-dimensional")
if mtx1.shape != mtx2.shape:
raise ValueError("Input matrices must be of same shape")
if mtx1.size == 0:
raise ValueError("Input matrices must be >0 rows and >0 cols")
# translate all the data to the origin
mtx1 -= np.mean(mtx1, 0)
mtx2 -= np.mean(mtx2, 0)
norm1 = np.linalg.norm(mtx1)
norm2 = np.linalg.norm(mtx2)
if norm1 == 0 or norm2 == 0:
raise ValueError("Input matrices must contain >1 unique points")
# change scaling of data (in rows) such that trace(mtx*mtx') = 1
mtx1 /= norm1
mtx2 /= norm2
# transform mtx2 to minimize disparity
R, s = orthogonal_procrustes(mtx1, mtx2)
mtx2 = np.dot(mtx2, R.T) * s
# measure the dissimilarity between the two datasets
disparity = np.sum(np.square(mtx1 - mtx2))
return mtx1, mtx2, disparity
| 4,466 | 32.335821 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/__init__.py
|
"""
=============================================================
Spatial algorithms and data structures (:mod:`scipy.spatial`)
=============================================================
.. currentmodule:: scipy.spatial
Nearest-neighbor Queries
========================
.. autosummary::
:toctree: generated/
KDTree -- class for efficient nearest-neighbor queries
cKDTree -- class for efficient nearest-neighbor queries (faster impl.)
Rectangle
Distance metrics are contained in the :mod:`scipy.spatial.distance` submodule.
Delaunay Triangulation, Convex Hulls and Voronoi Diagrams
=========================================================
.. autosummary::
:toctree: generated/
Delaunay -- compute Delaunay triangulation of input points
ConvexHull -- compute a convex hull for input points
Voronoi -- compute a Voronoi diagram hull from input points
SphericalVoronoi -- compute a Voronoi diagram from input points on the surface of a sphere
HalfspaceIntersection -- compute the intersection points of input halfspaces
Plotting Helpers
================
.. autosummary::
:toctree: generated/
delaunay_plot_2d -- plot 2-D triangulation
convex_hull_plot_2d -- plot 2-D convex hull
voronoi_plot_2d -- plot 2-D voronoi diagram
.. seealso:: :ref:`Tutorial <qhulltutorial>`
Simplex representation
======================
The simplices (triangles, tetrahedra, ...) appearing in the Delaunay
tessellation (N-dim simplices), convex hull facets, and Voronoi ridges
(N-1 dim simplices) are represented in the following scheme::
tess = Delaunay(points)
hull = ConvexHull(points)
voro = Voronoi(points)
# coordinates of the j-th vertex of the i-th simplex
tess.points[tess.simplices[i, j], :] # tessellation element
hull.points[hull.simplices[i, j], :] # convex hull facet
voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
For Delaunay triangulations and convex hulls, the neighborhood
structure of the simplices satisfies the condition:
``tess.neighbors[i,j]`` is the neighboring simplex of the i-th
simplex, opposite to the j-vertex. It is -1 in case of no
neighbor.
Convex hull facets also define a hyperplane equation::
(hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
Similar hyperplane equations for the Delaunay triangulation correspond
to the convex hull facets on the corresponding N+1 dimensional
paraboloid.
The Delaunay triangulation objects offer a method for locating the
simplex containing a given point, and barycentric coordinate
computations.
Functions
---------
.. autosummary::
:toctree: generated/
tsearch
distance_matrix
minkowski_distance
minkowski_distance_p
procrustes
"""
from __future__ import division, print_function, absolute_import
from .kdtree import *
from .ckdtree import *
from .qhull import *
from ._spherical_voronoi import SphericalVoronoi
from ._plotutils import *
from ._procrustes import procrustes
__all__ = [s for s in dir() if not s.startswith('_')]
__all__ += ['distance']
from . import distance
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 3,237 | 28.706422 | 93 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/distance.py
|
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
directed_hausdorff -- directed Hausdorff distance between arrays
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
wminkowski -- (deprecated) alias of `minkowski`.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulsinski -- the Kulsinski distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'directed_hausdorff',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'wminkowski',
'yule'
]
import warnings
import numpy as np
from functools import partial
from collections import namedtuple
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from scipy._lib._util import _asarray_validated
from . import _distance_wrap
from . import _hausdorff
from ..linalg import norm
def _args_to_kwargs_xdist(args, kwargs, metric, func_name):
"""
Convert legacy positional arguments to keyword arguments for pdist/cdist.
"""
if not args:
return kwargs
if (callable(metric) and metric not in [
braycurtis, canberra, chebyshev, cityblock, correlation, cosine,
dice, euclidean, hamming, jaccard, kulsinski, mahalanobis,
matching, minkowski, rogerstanimoto, russellrao, seuclidean,
sokalmichener, sokalsneath, sqeuclidean, yule, wminkowski]):
raise TypeError('When using a custom metric arguments must be passed'
'as keyword (i.e., ARGNAME=ARGVALUE)')
if func_name == 'pdist':
old_arg_names = ['p', 'w', 'V', 'VI']
else:
old_arg_names = ['p', 'V', 'VI', 'w']
num_args = len(args)
warnings.warn('%d metric parameters have been passed as positional.'
'This will raise an error in a future version.'
'Please pass arguments as keywords(i.e., ARGNAME=ARGVALUE)'
% num_args, DeprecationWarning)
if num_args > 4:
raise ValueError('Deprecated %s signature accepts only 4'
'positional arguments (%s), %d given.'
% (func_name, ', '.join(old_arg_names), num_args))
for old_arg, arg in zip(old_arg_names, args):
if old_arg in kwargs:
raise TypeError('%s() got multiple values for argument %s'
% (func_name, old_arg))
kwargs[old_arg] = arg
return kwargs
def _copy_array_if_base_present(a):
"""Copy the array if its base points to a parent array."""
if a.base is not None:
return a.copy()
return a
def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
XA = XA - XA.mean(axis=1, keepdims=True)
XB = XB - XB.mean(axis=1, keepdims=True)
_distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
def _correlation_pdist_wrap(X, dm, **kwargs):
X2 = X - X.mean(axis=1, keepdims=True)
_distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
def _convert_to_type(X, out_type):
return np.ascontiguousarray(X, dtype=out_type)
def _filter_deprecated_kwargs(kwargs, args_blacklist):
# Filtering out old default keywords
for k in args_blacklist:
if k in kwargs:
del kwargs[k]
warnings.warn('Got unexpected kwarg %s. This will raise an error'
' in a future version.' % k, DeprecationWarning)
def _nbool_correspond_all(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
return (nft, ntf)
def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
# validate data
XA = _convert_to_type(XA, out_type=typ)
XB = _convert_to_type(XB, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(np.vstack([XA, XB]), mA + mB, n, **kwargs)
else:
typ = None
return XA, XB, typ, kwargs
def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
VI = kwargs.pop('VI', None)
if VI is None:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
CV = np.atleast_2d(np.cov(X.astype(np.double).T))
VI = np.linalg.inv(CV).T.copy()
kwargs["VI"] = _convert_to_double(VI)
return kwargs
def _validate_minkowski_kwargs(X, m, n, **kwargs):
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def _validate_pdist_input(X, m, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
# validate data
X = _convert_to_type(X, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(X, m, n, **kwargs)
else:
typ = None
return X, typ, kwargs
def _validate_seuclidean_kwargs(X, m, n, **kwargs):
V = kwargs.pop('V', None)
if V is None:
V = np.var(X.astype(np.double), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
kwargs['V'] = _convert_to_double(V)
return kwargs
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def _validate_weights(w, dtype=np.double):
w = _validate_vector(w, dtype=dtype)
if np.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
def _validate_wminkowski_kwargs(X, m, n, **kwargs):
w = kwargs.pop('w', None)
if w is None:
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
kwargs['w'] = _convert_to_double(w)
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def directed_hausdorff(u, v, seed=0):
"""
Compute the directed Hausdorff distance between two N-D arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) ndarray
Input array.
v : (O,N) ndarray
Input array.
seed : int or None
Local `np.random.RandomState` seed. Default is 0, a random shuffling of
u and v that guarantees reproducibility.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
result = _hausdorff.directed_hausdorff(u, v, seed)
return result
def minkowski(u, v, p=2, w=None):
"""
Compute the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
\\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
minkowski : double
The Minkowski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
2.0
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
1.4142135623730951
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
1.2599210498948732
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
u_v = u - v
if w is not None:
w = _validate_weights(w)
if p == 1:
root_w = w
if p == 2:
# better precision and speed
root_w = np.sqrt(w)
else:
root_w = np.power(w, 1/p)
u_v = root_w * u_v
dist = norm(u_v, ord=p)
return dist
# `minkowski` gained weights in scipy 1.0. Once we're at say version 1.3,
# deprecated `wminkowski`. Not done at once because it would be annoying for
# downstream libraries that used `wminkowski` and support multiple scipy
# versions.
def wminkowski(u, v, p, w):
"""
Compute the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
Notes
-----
`wminkowski` is DEPRECATED. It implements a definition where weights
are powered. It is recommended to use the weighted version of `minkowski`
instead. This function will be removed in a future version of scipy.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3))
2.0
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3))
1.4142135623730951
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3))
1.2599210498948732
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3))
1.0
"""
w = _validate_weights(w)
return minkowski(u, v, p=p, w=w**p)
def euclidean(u, v, w=None):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
"""
return minkowski(u, v, p=2, w=w)
def sqeuclidean(u, v, w=None):
"""
Compute the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
2.0
>>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
1.0
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
u_v_w = u_v # only want weights applied once
if w is not None:
w = _validate_weights(w)
u_v_w = w * u_v
return np.dot(u_v, u_v_w)
def correlation(u, v, w=None, centered=True):
"""
Compute the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if centered:
umu = np.average(u, weights=w)
vmu = np.average(v, weights=w)
u = u - umu
v = v - vmu
uv = np.average(u * v, weights=w)
uu = np.average(np.square(u), weights=w)
vv = np.average(np.square(v), weights=w)
dist = 1.0 - uv / np.sqrt(uu * vv)
return dist
def cosine(u, v, w=None):
"""
Compute the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cosine([1, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([100, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([1, 1, 0], [0, 1, 0])
0.29289321881345254
"""
# cosine distance is also referred to as 'uncentered correlation',
# or 'reflective correlation'
return correlation(u, v, w=w, centered=False)
def hamming(u, v, w=None):
"""
Compute the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.hamming([1, 0, 0], [0, 1, 0])
0.66666666666666663
>>> distance.hamming([1, 0, 0], [1, 1, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [2, 0, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [3, 0, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
u_ne_v = u != v
if w is not None:
w = _validate_weights(w)
return np.average(u_ne_v, weights=w)
def jaccard(u, v, w=None):
"""
Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.jaccard([1, 0, 0], [0, 1, 0])
1.0
>>> distance.jaccard([1, 0, 0], [1, 1, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 2, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 1, 1])
0.66666666666666663
"""
u = _validate_vector(u)
v = _validate_vector(v)
nonzero = np.bitwise_or(u != 0, v != 0)
unequal_nonzero = np.bitwise_and((u != v), nonzero)
if w is not None:
w = _validate_weights(w)
nonzero = w * nonzero
unequal_nonzero = w * unequal_nonzero
dist = np.double(unequal_nonzero.sum()) / np.double(nonzero.sum())
return dist
def kulsinski(u, v, w=None):
"""
Compute the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulsinski([1, 0, 0], [0, 1, 0])
1.0
>>> distance.kulsinski([1, 0, 0], [1, 1, 0])
0.75
>>> distance.kulsinski([1, 0, 0], [2, 1, 0])
0.33333333333333331
>>> distance.kulsinski([1, 0, 0], [3, 1, 0])
-0.5
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is None:
n = float(len(u))
else:
w = _validate_weights(w)
n = w.sum()
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Return the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
4.4721359549995796
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
3.3166247903553998
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
3.1780497164141406
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return euclidean(u, v, w=1/V)
def cityblock(u, v, w=None):
"""
Compute the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cityblock([1, 0, 0], [0, 1, 0])
2
>>> distance.cityblock([1, 0, 0], [0, 2, 0])
3
>>> distance.cityblock([1, 0, 0], [1, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
l1_diff = abs(u - v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
return l1_diff.sum()
def mahalanobis(u, v, VI):
"""
Compute the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
>>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1.7320508075688772
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v, w=None):
"""
Compute the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1
>>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
has_weight = w > 0
if has_weight.sum() < w.size:
u = u[has_weight]
v = v[has_weight]
return max(abs(u - v))
def braycurtis(u, v, w=None):
"""
Compute the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1.0
>>> distance.braycurtis([1, 1, 0], [0, 1, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
l1_diff = abs(u - v)
l1_sum = abs(u + v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
l1_sum = w * l1_sum
return l1_diff.sum() / l1_sum.sum()
def canberra(u, v, w=None):
"""
Compute the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.canberra([1, 0, 0], [0, 1, 0])
2.0
>>> distance.canberra([1, 1, 0], [0, 1, 0])
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
if w is not None:
w = _validate_weights(w)
olderr = np.seterr(invalid='ignore')
try:
abs_uv = abs(u - v)
abs_u = abs(u)
abs_v = abs(v)
d = abs_uv / (abs_u + abs_v)
if w is not None:
d = w * d
d = np.nansum(d)
finally:
np.seterr(**olderr)
return d
def yule(u, v, w=None):
"""
Compute the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.yule([1, 0, 0], [0, 1, 0])
2.0
>>> distance.yule([1, 1, 0], [0, 1, 0])
0.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft))
@np.deprecate(message="spatial.distance.matching is deprecated in scipy 1.0.0; "
"use spatial.distance.hamming instead.")
def matching(u, v, w=None):
"""
Compute the Hamming distance between two boolean 1-D arrays.
This is a deprecated synonym for :func:`hamming`.
"""
return hamming(u, v, w=w)
def dice(u, v, w=None):
"""
Compute the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.dice([1, 0, 0], [0, 1, 0])
1.0
>>> distance.dice([1, 0, 0], [1, 1, 0])
0.3333333333333333
>>> distance.dice([1, 0, 0], [2, 0, 0])
-0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
if w is None:
ntt = (u * v).sum()
else:
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
def rogerstanimoto(u, v, w=None):
"""
Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
0.8
>>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
0.5
>>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v, w=None):
"""
Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.russellrao([1, 0, 0], [0, 1, 0])
1.0
>>> distance.russellrao([1, 0, 0], [1, 1, 0])
0.6666666666666666
>>> distance.russellrao([1, 0, 0], [2, 0, 0])
0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
n = float(len(u))
elif w is None:
ntt = (u * v).sum()
n = float(len(u))
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
n = w.sum()
return float(n - ntt) / n
def sokalmichener(u, v, w=None):
"""
Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
0.8
>>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
0.5
>>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
elif w is None:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
nff = ((1.0 - u) * (1.0 - v) * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v, w=None):
"""
Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1.0
>>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
0.66666666666666663
>>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
0.0
>>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
-2.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
elif w is None:
ntt = (u * v).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
denom = np.array(ntt + 2.0 * (ntf + nft))
if not denom.any():
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
_convert_to_double = partial(_convert_to_type, out_type=np.double)
_convert_to_bool = partial(_convert_to_type, out_type=bool)
# adding python-only wrappers to _distance_wrap module
_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
# Registry of implemented metrics:
# Dictionary with the following structure:
# {
# metric_name : MetricInfo(aka, types=[double], validator=None)
# }
#
# Where:
# `metric_name` must be equal to python metric name
#
# MetricInfo is a named tuple with fields:
# 'aka' : [list of aliases],
#
# 'validator': f(X, m, n, **kwargs) # function that check kwargs and
# # computes default values.
#
# 'types': [list of supported types], # X (pdist) and XA (cdist) are used to
# # choose the type. if there is no match
# # the first type is used. Default double
#}
MetricInfo = namedtuple("MetricInfo", 'aka types validator ')
MetricInfo.__new__.__defaults__ = (['double'], None)
_METRICS = {
'braycurtis': MetricInfo(aka=['braycurtis']),
'canberra': MetricInfo(aka=['canberra']),
'chebyshev': MetricInfo(aka=['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']),
'cityblock': MetricInfo(aka=['cityblock', 'cblock', 'cb', 'c']),
'correlation': MetricInfo(aka=['correlation', 'co']),
'cosine': MetricInfo(aka=['cosine', 'cos']),
'dice': MetricInfo(aka=['dice'], types=['bool']),
'euclidean': MetricInfo(aka=['euclidean', 'euclid', 'eu', 'e']),
'hamming': MetricInfo(aka=['matching', 'hamming', 'hamm', 'ha', 'h'],
types=['double', 'bool']),
'jaccard': MetricInfo(aka=['jaccard', 'jacc', 'ja', 'j'],
types=['double', 'bool']),
'kulsinski': MetricInfo(aka=['kulsinski'], types=['bool']),
'mahalanobis': MetricInfo(aka=['mahalanobis', 'mahal', 'mah'],
validator=_validate_mahalanobis_kwargs),
'minkowski': MetricInfo(aka=['minkowski', 'mi', 'm', 'pnorm'],
validator=_validate_minkowski_kwargs),
'rogerstanimoto': MetricInfo(aka=['rogerstanimoto'], types=['bool']),
'russellrao': MetricInfo(aka=['russellrao'], types=['bool']),
'seuclidean': MetricInfo(aka=['seuclidean', 'se', 's'],
validator=_validate_seuclidean_kwargs),
'sokalmichener': MetricInfo(aka=['sokalmichener'], types=['bool']),
'sokalsneath': MetricInfo(aka=['sokalsneath'], types=['bool']),
'sqeuclidean': MetricInfo(aka=['sqeuclidean', 'sqe', 'sqeuclid']),
'wminkowski': MetricInfo(aka=['wminkowski', 'wmi', 'wm', 'wpnorm'],
validator=_validate_wminkowski_kwargs),
'yule': MetricInfo(aka=['yule'], types=['bool']),
}
_METRIC_ALIAS = dict((alias, name)
for name, info in _METRICS.items()
for alias in info.aka)
_METRICS_NAMES = list(_METRICS.keys())
_TEST_METRICS = {'test_' + name: globals()[name] for name in _METRICS.keys()}
def pdist(X, metric='euclidean', *args, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
out : ndarray.
The output array
If not None, condensed distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
X = np.asarray(X, order='c')
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
else:
if out.shape != (m * (m - 1) // 2,):
raise ValueError("output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['minkowski'].aka or
metric in _METRICS['wminkowski'].aka or
metric in ['test_minkowski', 'test_wminkowski'] or
metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka or
metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = metric(X[i], X[j], **kwargs)
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
# NOTE: C-version still does not support weights
if "w" in kwargs and not mstr.startswith("test_"):
if(mstr in _METRICS['seuclidean'].aka or
mstr in _METRICS['mahalanobis'].aka):
raise ValueError("metric %s incompatible with weights" % mstr)
# need to use python version for weighting
kwargs['out'] = out
mstr = "test_%s" % mstr
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
# get pdist wrapper
pdist_fn = getattr(_distance_wrap,
"pdist_%s_%s_wrap" % (metric_name, typ))
pdist_fn(X, dm, **kwargs)
return dm
elif mstr in ['old_cosine', 'old_cos']:
warnings.warn('"old_cosine" is deprecated and will be removed in '
'a future version. Use "cosine" instead.',
DeprecationWarning)
X = _convert_to_double(X)
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
np.sqrt(norms, out=norms)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Convert a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to ``'tovector'`` or
``'tomatrix'``, the input will be treated as a distance matrix or
distance vector respectively.
checks : bool, optional
If set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or
:math:`{n \\choose 2}`) sized vector v.
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding
distances as described, ``X = squareform(v)`` returns a d by d distance
matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all
diagonal elements are zero.
In Scipy 0.19.0, ``squareform`` stopped casting all input types to
float64, and started returning arrays of the same dtype as the input.
"""
X = np.ascontiguousarray(X)
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if s[0] == 0:
return np.zeros((1, 1), dtype=X.dtype)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(s[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) != s[0] * 2:
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=X.dtype)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Return True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional numpy arrays.
Their length must be a binomial coefficient :math:`{n \\choose 2}`
for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \\choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \\choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Return the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Return the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def cdist(XA, XB, metric='euclidean', *args, **kwargs):
"""
Compute distance between each pair of the two collections of inputs.
See Notes for common calling conventions.
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(vstack([XA, XB]), axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(vstack([XA, XB].T))).T
out : ndarray
The output array
If not None, the distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Notes
-----
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
{\\sum_i (|u_i+v_i|)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "cdist")
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((mA, mB), dtype=np.double)
else:
if out.shape != (mA, mB):
raise ValueError("Output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['minkowski'].aka or
metric in _METRICS['wminkowski'].aka or
metric in ['test_minkowski', 'test_wminkowski'] or
metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka or
metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'Unknown')
metric_name = _METRIC_ALIAS.get(mstr, None)
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i], XB[j], **kwargs)
elif isinstance(metric, string_types):
mstr = metric.lower()
# NOTE: C-version still does not support weights
if "w" in kwargs and not mstr.startswith("test_"):
if(mstr in _METRICS['seuclidean'].aka or
mstr in _METRICS['mahalanobis'].aka):
raise ValueError("metric %s incompatible with weights" % mstr)
# need to use python version for weighting
kwargs['out'] = out
mstr = "test_%s" % mstr
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
# get cdist wrapper
cdist_fn = getattr(_distance_wrap,
"cdist_%s_%s_wrap" % (metric_name, typ))
cdist_fn(XA, XB, dm, **kwargs)
return dm
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = cdist(XA, XB, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
| 83,020 | 30.047494 | 83 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test_distance.py
|
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import os.path
from functools import wraps, partial
from scipy._lib.six import xrange, u
import numpy as np
import warnings
from numpy.linalg import norm
from numpy.testing import (verbose, assert_,
assert_array_equal, assert_equal,
assert_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.spatial.distance import (squareform, pdist, cdist, num_obs_y,
num_obs_dm, is_valid_dm, is_valid_y,
_validate_vector, _METRICS_NAMES)
# these were missing: chebyshev cityblock kulsinski
from scipy.spatial.distance import (braycurtis, canberra, chebyshev, cityblock,
correlation, cosine, dice, euclidean,
hamming, jaccard, kulsinski, mahalanobis,
matching, minkowski, rogerstanimoto,
russellrao, seuclidean, sokalmichener,
sokalsneath, sqeuclidean, yule)
from scipy.spatial.distance import wminkowski as old_wminkowski
_filenames = [
"cdist-X1.txt",
"cdist-X2.txt",
"iris.txt",
"pdist-boolean-inp.txt",
"pdist-chebyshev-ml-iris.txt",
"pdist-chebyshev-ml.txt",
"pdist-cityblock-ml-iris.txt",
"pdist-cityblock-ml.txt",
"pdist-correlation-ml-iris.txt",
"pdist-correlation-ml.txt",
"pdist-cosine-ml-iris.txt",
"pdist-cosine-ml.txt",
"pdist-double-inp.txt",
"pdist-euclidean-ml-iris.txt",
"pdist-euclidean-ml.txt",
"pdist-hamming-ml.txt",
"pdist-jaccard-ml.txt",
"pdist-minkowski-3.2-ml-iris.txt",
"pdist-minkowski-3.2-ml.txt",
"pdist-minkowski-5.8-ml-iris.txt",
"pdist-seuclidean-ml-iris.txt",
"pdist-seuclidean-ml.txt",
"pdist-spearman-ml.txt",
"random-bool-data.txt",
"random-double-data.txt",
"random-int-data.txt",
"random-uint-data.txt",
]
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0]], dtype='double')
_ytdist = squareform(_tdist)
# A hashmap of expected output arrays for the tests. These arrays
# come from a list of text files, which are read prior to testing.
# Each test loads inputs and outputs from this dictionary.
eo = {}
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), 'data', fn)
fp = open(fqfn)
eo[name] = np.loadtxt(fp)
fp.close()
eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
eo['random-bool-data'] = np.bool_(eo['random-bool-data'])
eo['random-float32-data'] = np.float32(eo['random-double-data'])
eo['random-int-data'] = np.int_(eo['random-int-data'])
eo['random-uint-data'] = np.uint(eo['random-uint-data'])
load_testing_files()
def _chk_asarrays(arrays, axis=None):
arrays = [np.asanyarray(a) for a in arrays]
if axis is None:
# np < 1.10 ravel removes subclass from arrays
arrays = [np.ravel(a) if a.ndim != 1 else a
for a in arrays]
axis = 0
arrays = tuple(np.atleast_1d(a) for a in arrays)
if axis < 0:
if not all(a.ndim == arrays[0].ndim for a in arrays):
raise ValueError("array ndim must be the same for neg axis")
axis = range(arrays[0].ndim)[axis]
return arrays + (axis,)
def _chk_weights(arrays, weights=None, axis=None,
force_weights=False, simplify_weights=True,
pos_only=False, neg_check=False,
nan_screen=False, mask_screen=False,
ddof=None):
chked = _chk_asarrays(arrays, axis=axis)
arrays, axis = chked[:-1], chked[-1]
simplify_weights = simplify_weights and not force_weights
if not force_weights and mask_screen:
force_weights = any(np.ma.getmask(a) is not np.ma.nomask for a in arrays)
if nan_screen:
has_nans = [np.isnan(np.sum(a)) for a in arrays]
if any(has_nans):
mask_screen = True
force_weights = True
arrays = tuple(np.ma.masked_invalid(a) if has_nan else a
for a, has_nan in zip(arrays, has_nans))
if weights is not None:
weights = np.asanyarray(weights)
elif force_weights:
weights = np.ones(arrays[0].shape[axis])
else:
return arrays + (weights, axis)
if ddof:
weights = _freq_weights(weights)
if mask_screen:
weights = _weight_masked(arrays, weights, axis)
if not all(weights.shape == (a.shape[axis],) for a in arrays):
raise ValueError("weights shape must match arrays along axis")
if neg_check and (weights < 0).any():
raise ValueError("weights cannot be negative")
if pos_only:
pos_weights = np.where(weights > 0)[0]
if pos_weights.size < weights.size:
arrays = tuple(np.take(a, pos_weights, axis=axis) for a in arrays)
weights = weights[pos_weights]
if simplify_weights and (weights == 1).all():
weights = None
return arrays + (weights, axis)
def _freq_weights(weights):
if weights is None:
return weights
int_weights = weights.astype(int)
if (weights != int_weights).any():
raise ValueError("frequency (integer count-type) weights required %s" % weights)
return int_weights
def _weight_masked(arrays, weights, axis):
if axis is None:
axis = 0
weights = np.asanyarray(weights)
for a in arrays:
axis_mask = np.ma.getmask(a)
if axis_mask is np.ma.nomask:
continue
if a.ndim > 1:
not_axes = tuple(i for i in range(a.ndim) if i != axis)
axis_mask = axis_mask.any(axis=not_axes)
weights *= 1 - axis_mask.astype(int)
return weights
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def _assert_within_tol(a, b, atol=0, rtol=0, verbose_=False):
if verbose_:
print(np.abs(a - b).max())
assert_allclose(a, b, rtol=rtol, atol=atol)
def _rand_split(arrays, weights, axis, split_per, seed=None):
# inverse operation for stats.collapse_weights
weights = np.array(weights, dtype=np.float64) # modified inplace; need a copy
seeded_rand = np.random.RandomState(seed)
def mytake(a, ix, axis):
record = np.asanyarray(np.take(a, ix, axis=axis))
return record.reshape([a.shape[i] if i != axis else 1
for i in range(a.ndim)])
n_obs = arrays[0].shape[axis]
assert all(a.shape[axis] == n_obs for a in arrays), "data must be aligned on sample axis"
for i in range(int(split_per) * n_obs):
split_ix = seeded_rand.randint(n_obs + i)
prev_w = weights[split_ix]
q = seeded_rand.rand()
weights[split_ix] = q * prev_w
weights = np.append(weights, (1. - q) * prev_w)
arrays = [np.append(a, mytake(a, split_ix, axis=axis),
axis=axis) for a in arrays]
return arrays, weights
def _rough_check(a, b, compare_assert=partial(assert_allclose, atol=1e-5),
key=lambda x: x, w=None):
check_a = key(a)
check_b = key(b)
try:
if np.array(check_a != check_b).any(): # try strict equality for string types
compare_assert(check_a, check_b)
except AttributeError: # masked array
compare_assert(check_a, check_b)
except (TypeError, ValueError): # nested data structure
for a_i, b_i in zip(check_a, check_b):
_rough_check(a_i, b_i, compare_assert=compare_assert)
# diff from test_stats:
# n_args=2, weight_arg='w', default_axis=None
# ma_safe = False, nan_safe = False
def _weight_checked(fn, n_args=2, default_axis=None, key=lambda x: x, weight_arg='w',
squeeze=True, silent=False,
ones_test=True, const_test=True, dup_test=True,
split_test=True, dud_test=True, ma_safe=False, ma_very_safe=False, nan_safe=False,
split_per=1.0, seed=0, compare_assert=partial(assert_allclose, atol=1e-5)):
"""runs fn on its arguments 2 or 3 ways, checks that the results are the same,
then returns the same thing it would have returned before"""
@wraps(fn)
def wrapped(*args, **kwargs):
result = fn(*args, **kwargs)
arrays = args[:n_args]
rest = args[n_args:]
weights = kwargs.get(weight_arg, None)
axis = kwargs.get('axis', default_axis)
chked = _chk_weights(arrays, weights=weights, axis=axis, force_weights=True, mask_screen=True)
arrays, weights, axis = chked[:-2], chked[-2], chked[-1]
if squeeze:
arrays = [np.atleast_1d(a.squeeze()) for a in arrays]
try:
# WEIGHTS CHECK 1: EQUAL WEIGHTED OBESERVATIONS
args = tuple(arrays) + rest
if ones_test:
kwargs[weight_arg] = weights
_rough_check(result, fn(*args, **kwargs), key=key)
if const_test:
kwargs[weight_arg] = weights * 101.0
_rough_check(result, fn(*args, **kwargs), key=key)
kwargs[weight_arg] = weights * 0.101
try:
_rough_check(result, fn(*args, **kwargs), key=key)
except Exception as e:
raise type(e)((e, arrays, weights))
# WEIGHTS CHECK 2: ADDL 0-WEIGHTED OBS
if dud_test:
# add randomly resampled rows, weighted at 0
dud_arrays, dud_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
dud_weights[:weights.size] = weights # not exactly 1 because of masked arrays
dud_weights[weights.size:] = 0
dud_args = tuple(dud_arrays) + rest
kwargs[weight_arg] = dud_weights
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# increase the value of those 0-weighted rows
for a in dud_arrays:
indexer = [slice(None)] * a.ndim
indexer[axis] = slice(weights.size, None)
a[indexer] = a[indexer] * 101
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# set those 0-weighted rows to NaNs
for a in dud_arrays:
indexer = [slice(None)] * a.ndim
indexer[axis] = slice(weights.size, None)
a[indexer] = a[indexer] * np.nan
if kwargs.get("nan_policy", None) == "omit" and nan_safe:
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
# mask out those nan values
if ma_safe:
dud_arrays = [np.ma.masked_invalid(a) for a in dud_arrays]
dud_args = tuple(dud_arrays) + rest
_rough_check(result, fn(*dud_args, **kwargs), key=key)
if ma_very_safe:
kwargs[weight_arg] = None
_rough_check(result, fn(*dud_args, **kwargs), key=key)
del dud_arrays, dud_args, dud_weights
# WEIGHTS CHECK 3: DUPLICATE DATA (DUMB SPLITTING)
if dup_test:
dup_arrays = [np.append(a, a, axis=axis) for a in arrays]
dup_weights = np.append(weights, weights) / 2.0
dup_args = tuple(dup_arrays) + rest
kwargs[weight_arg] = dup_weights
_rough_check(result, fn(*dup_args, **kwargs), key=key)
del dup_args, dup_arrays, dup_weights
# WEIGHT CHECK 3: RANDOM SPLITTING
if split_test and split_per > 0:
split_arrays, split_weights = _rand_split(arrays, weights, axis, split_per=split_per, seed=seed)
split_args = tuple(split_arrays) + rest
kwargs[weight_arg] = split_weights
_rough_check(result, fn(*split_args, **kwargs), key=key)
except NotImplementedError as e:
# when some combination of arguments makes weighting impossible,
# this is the desired response
if not silent:
warnings.warn("%s NotImplemented weights: %s" % (fn.__name__, e))
return result
return wrapped
wcdist = _weight_checked(cdist, default_axis=1, squeeze=False)
wcdist_no_const = _weight_checked(cdist, default_axis=1, squeeze=False, const_test=False)
wpdist = _weight_checked(pdist, default_axis=1, squeeze=False, n_args=1)
wpdist_no_const = _weight_checked(pdist, default_axis=1, squeeze=False, const_test=False, n_args=1)
wrogerstanimoto = _weight_checked(rogerstanimoto)
wmatching = whamming = _weight_checked(hamming, dud_test=False)
wyule = _weight_checked(yule)
wdice = _weight_checked(dice)
wcityblock = _weight_checked(cityblock)
wchebyshev = _weight_checked(chebyshev)
wcosine = _weight_checked(cosine)
wcorrelation = _weight_checked(correlation)
wkulsinski = _weight_checked(kulsinski)
wminkowski = _weight_checked(minkowski, const_test=False)
wjaccard = _weight_checked(jaccard)
weuclidean = _weight_checked(euclidean, const_test=False)
wsqeuclidean = _weight_checked(sqeuclidean, const_test=False)
wbraycurtis = _weight_checked(braycurtis)
wcanberra = _weight_checked(canberra, const_test=False)
wsokalsneath = _weight_checked(sokalsneath)
wsokalmichener = _weight_checked(sokalmichener)
wrussellrao = _weight_checked(russellrao)
class TestCdist(object):
def setup_method(self):
self.rnd_eo_names = ['random-float32-data', 'random-int-data',
'random-uint-data', 'random-double-data',
'random-bool-data']
self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
'uint': [np.int_, np.float32, np.double],
'int': [np.float32, np.double],
'float32': [np.double]}
def test_cdist_extra_args(self):
# Tests that args and kwargs are correctly handled
def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
return arg + kwarg + kwarg2
X1 = [[1., 2., 3.], [1.2, 2.3, 3.4], [2.2, 2.3, 4.4]]
X2 = [[7., 5., 8.], [7.5, 5.8, 8.4], [5.5, 5.8, 4.4]]
kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(3)}
args = [3.14] * 200
with suppress_warnings() as w:
w.filter(DeprecationWarning)
for metric in _METRICS_NAMES:
assert_raises(TypeError, cdist, X1, X2,
metric=metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric=eval(metric), **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric="test_" + metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2,
metric=metric, *args)
assert_raises(TypeError, cdist, X1, X2,
metric=eval(metric), *args)
assert_raises(TypeError, cdist, X1, X2,
metric="test_" + metric, *args)
assert_raises(TypeError, cdist, X1, X2, _my_metric)
assert_raises(TypeError, cdist, X1, X2, _my_metric, *args)
assert_raises(TypeError, cdist, X1, X2, _my_metric, **kwargs)
assert_raises(TypeError, cdist, X1, X2, _my_metric,
kwarg=2.2, kwarg2=3.3)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1, 2, kwarg=2.2)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2, 3.3)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1, 2.2)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1)
assert_raises(TypeError, cdist, X1, X2, _my_metric, 1.1,
kwarg=2.2, kwarg2=3.3)
# this should work
assert_allclose(cdist(X1, X2, metric=_my_metric,
arg=1.1, kwarg2=3.3), 5.4)
def test_cdist_euclidean_random_unicode(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, u('euclidean'))
Y2 = wcdist_no_const(X1, X2, u('test_euclidean'))
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p3d8(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, 'minkowski', p=3.8)
Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=3.8)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p4d6(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, 'minkowski', p=4.6)
Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=4.6)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_minkowski_random_p1d23(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist_no_const(X1, X2, 'minkowski', p=1.23)
Y2 = wcdist_no_const(X1, X2, 'test_minkowski', p=1.23)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_cosine_random(self):
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
Y1 = wcdist(X1, X2, 'cosine')
# Naive implementation
def norms(X):
return np.linalg.norm(X, axis=1).reshape(-1, 1)
Y2 = 1 - np.dot((X1 / norms(X1)), (X2 / norms(X2)).T)
_assert_within_tol(Y1, Y2, eps, verbose > 2)
def test_cdist_mahalanobis(self):
# 1-dimensional observations
x1 = np.array([[2], [3]])
x2 = np.array([[2], [5]])
dist = cdist(x1, x2, metric='mahalanobis')
assert_allclose(dist, [[0.0, np.sqrt(4.5)], [np.sqrt(0.5), np.sqrt(2)]])
# 2-dimensional observations
x1 = np.array([[0, 0], [-1, 0]])
x2 = np.array([[0, 2], [1, 0], [0, -2]])
dist = cdist(x1, x2, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [[rt2, rt2, rt2], [2, 2 * rt2, 2]])
# Too few observations
assert_raises(ValueError,
cdist, [[0, 1]], [[2, 3]], metric='mahalanobis')
def test_cdist_custom_notdouble(self):
class myclass(object):
pass
def _my_metric(x, y):
if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
raise ValueError("Type has been changed")
return 1.123
data = np.array([[myclass()]], dtype=object)
cdist_y = cdist(data, data, metric=_my_metric)
right_y = 1.123
assert_equal(cdist_y, right_y, verbose=verbose > 2)
def _check_calling_conventions(self, X1, X2, metric, eps=1e-07, **kwargs):
# helper function for test_cdist_calling_conventions
try:
y1 = cdist(X1, X2, metric=metric, **kwargs)
y2 = cdist(X1, X2, metric=eval(metric), **kwargs)
y3 = cdist(X1, X2, metric="test_" + metric, **kwargs)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
assert_raises(e_cls, cdist, X1, X2, metric=metric, **kwargs)
assert_raises(e_cls, cdist, X1, X2, metric=eval(metric), **kwargs)
assert_raises(e_cls, cdist, X1, X2, metric="test_" + metric, **kwargs)
else:
_assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)
_assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)
def test_cdist_calling_conventions(self):
# Ensures that specifying the metric with a str or scipy function
# gives the same behaviour (i.e. same result or same exception).
# NOTE: The correctness should be checked within each metric tests.
for eo_name in self.rnd_eo_names:
# subsampling input data to speed-up tests
# NOTE: num samples needs to be > than dimensions for mahalanobis
X1 = eo[eo_name][::5, ::-2]
X2 = eo[eo_name][1::5, ::2]
for metric in _METRICS_NAMES:
if verbose > 2:
print("testing: ", metric, " with: ", eo_name)
if metric == 'wminkowski':
continue
if metric in {'dice', 'yule', 'kulsinski', 'matching',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath'} and 'bool' not in eo_name:
# python version permits non-bools e.g. for fuzzy logic
continue
self._check_calling_conventions(X1, X2, metric)
# Testing built-in metrics with extra args
if metric == "seuclidean":
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.var(X12, axis=0, ddof=1)
self._check_calling_conventions(X1, X2, metric, V=V)
elif metric == "mahalanobis":
X12 = np.vstack([X1, X2]).astype(np.double)
V = np.atleast_2d(np.cov(X12.T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X1, X2, metric, VI=VI)
def test_cdist_dtype_equivalence(self):
# Tests that the result is not affected by type up-casting
eps = 1e-07
tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
(eo['random-uint-data'], self.valid_upcasts['uint']),
(eo['random-int-data'], self.valid_upcasts['int']),
(eo['random-float32-data'], self.valid_upcasts['float32'])]
for metric in _METRICS_NAMES:
for test in tests:
X1 = test[0][::5, ::-2]
X2 = test[0][1::5, ::2]
try:
y1 = cdist(X1, X2, metric=metric)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
for new_type in test[1]:
X1new = new_type(X1)
X2new = new_type(X2)
assert_raises(e_cls, cdist, X1new, X2new, metric=metric)
else:
for new_type in test[1]:
y2 = cdist(new_type(X1), new_type(X2), metric=metric)
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_cdist_out(self):
# Test that out parameter works properly
eps = 1e-07
X1 = eo['cdist-X1']
X2 = eo['cdist-X2']
out_r, out_c = X1.shape[0], X2.shape[0]
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X1.std(axis=0)
out1 = np.empty((out_r, out_c), dtype=np.double)
Y1 = cdist(X1, X2, metric, **kwargs)
Y2 = cdist(X1, X2, metric, out=out1, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
# test that Y_test1 and out1 are the same object
assert_(Y2 is out1)
# test for incorrect shape
out2 = np.empty((out_r-1, out_c+1), dtype=np.double)
assert_raises(ValueError, cdist, X1, X2, metric, out=out2, **kwargs)
# test for C-contiguous order
out3 = np.empty((2 * out_r, 2 * out_c), dtype=np.double)[::2, ::2]
out4 = np.empty((out_r, out_c), dtype=np.double, order='F')
assert_raises(ValueError, cdist, X1, X2, metric, out=out3, **kwargs)
assert_raises(ValueError, cdist, X1, X2, metric, out=out4, **kwargs)
# test for incorrect dtype
out5 = np.empty((out_r, out_c), dtype=np.int64)
assert_raises(ValueError, cdist, X1, X2, metric, out=out5, **kwargs)
def test_striding(self):
# test that striding is handled correct with calls to
# _copy_array_if_base_present
eps = 1e-07
X1 = eo['cdist-X1'][::2, ::2]
X2 = eo['cdist-X2'][::2, ::2]
X1_copy = X1.copy()
X2_copy = X2.copy()
# confirm equivalence
assert_equal(X1, X1_copy)
assert_equal(X2, X2_copy)
# confirm contiguity
assert_(not X1.flags.c_contiguous)
assert_(not X2.flags.c_contiguous)
assert_(X1_copy.flags.c_contiguous)
assert_(X2_copy.flags.c_contiguous)
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X1.std(axis=0)
Y1 = cdist(X1, X2, metric, **kwargs)
Y2 = cdist(X1_copy, X2_copy, metric, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestPdist(object):
def setup_method(self):
self.rnd_eo_names = ['random-float32-data', 'random-int-data',
'random-uint-data', 'random-double-data',
'random-bool-data']
self.valid_upcasts = {'bool': [np.uint, np.int_, np.float32, np.double],
'uint': [np.int_, np.float32, np.double],
'int': [np.float32, np.double],
'float32': [np.double]}
def test_pdist_extra_args(self):
# Tests that args and kwargs are correctly handled
def _my_metric(x, y, arg, kwarg=1, kwarg2=2):
return arg + kwarg + kwarg2
X1 = [[1., 2.], [1.2, 2.3], [2.2, 2.3]]
kwargs = {'N0tV4l1D_p4raM': 3.14, "w":np.arange(2)}
args = [3.14] * 200
with suppress_warnings() as w:
w.filter(DeprecationWarning)
for metric in _METRICS_NAMES:
assert_raises(TypeError, pdist, X1, metric=metric, **kwargs)
assert_raises(TypeError, pdist, X1,
metric=eval(metric), **kwargs)
assert_raises(TypeError, pdist, X1,
metric="test_" + metric, **kwargs)
assert_raises(TypeError, pdist, X1, metric=metric, *args)
assert_raises(TypeError, pdist, X1, metric=eval(metric), *args)
assert_raises(TypeError, pdist, X1,
metric="test_" + metric, *args)
assert_raises(TypeError, pdist, X1, _my_metric)
assert_raises(TypeError, pdist, X1, _my_metric, *args)
assert_raises(TypeError, pdist, X1, _my_metric, **kwargs)
assert_raises(TypeError, pdist, X1, _my_metric,
kwarg=2.2, kwarg2=3.3)
assert_raises(TypeError, pdist, X1, _my_metric, 1, 2, kwarg=2.2)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2, 3.3)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1, 2.2)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1)
assert_raises(TypeError, pdist, X1, _my_metric, 1.1,
kwarg=2.2, kwarg2=3.3)
# these should work
assert_allclose(pdist(X1, metric=_my_metric,
arg=1.1, kwarg2=3.3), 5.4)
def test_pdist_euclidean_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_u(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, u('euclidean'))
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-euclidean']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-euclidean']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_euclidean_iris_double(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_euclidean_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-euclidean-iris']
Y_test1 = wpdist_no_const(X, 'euclidean')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_euclidean_iris_nonC(self):
# Test pdist(X, 'test_euclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-euclidean-iris']
Y_test2 = wpdist_no_const(X, 'test_euclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-seuclidean']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_random_nonC(self):
# Test pdist(X, 'test_sqeuclidean') [the non-C implementation]
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-seuclidean']
Y_test2 = pdist(X, 'test_seuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_seuclidean_iris(self):
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_float32(self):
# Tests pdist(X, 'seuclidean') on the Iris data set (float32).
eps = 1e-05
X = np.float32(eo['iris'])
Y_right = eo['pdist-seuclidean-iris']
Y_test1 = pdist(X, 'seuclidean')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_seuclidean_iris_nonC(self):
# Test pdist(X, 'test_seuclidean') [the non-C implementation] on the
# Iris data set.
eps = 1e-05
X = eo['iris']
Y_right = eo['pdist-seuclidean-iris']
Y_test2 = pdist(X, 'test_seuclidean')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cosine']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_random_nonC(self):
# Test pdist(X, 'test_cosine') [the non-C implementation]
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-cosine']
Y_test2 = wpdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cosine_iris_float32(self):
eps = 1e-07
X = np.float32(eo['iris'])
Y_right = eo['pdist-cosine-iris']
Y_test1 = wpdist(X, 'cosine')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cosine_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-cosine-iris']
Y_test2 = wpdist(X, 'test_cosine')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cosine_bounds(self):
# Test adapted from @joernhees's example at gh-5208: case where
# cosine distance used to be negative. XXX: very sensitive to the
# specific norm computation.
x = np.abs(np.random.RandomState(1337).rand(91))
X = np.vstack([x, x])
assert_(wpdist(X, 'cosine')[0] >= 0,
msg='cosine distance should be non-negative')
def test_pdist_cityblock_random(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_float32(self):
eps = 1e-06
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-cityblock']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_random_nonC(self):
eps = 1e-06
X = eo['pdist-double-inp']
Y_right = eo['pdist-cityblock']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_cityblock_iris(self):
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_cityblock_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-cityblock-iris']
Y_test1 = wpdist_no_const(X, 'cityblock')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_cityblock_iris_nonC(self):
# Test pdist(X, 'test_cityblock') [the non-C implementation] on the
# Iris data set.
eps = 1e-14
X = eo['iris']
Y_right = eo['pdist-cityblock-iris']
Y_test2 = wpdist_no_const(X, 'test_cityblock')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_random(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-correlation']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_random_nonC(self):
eps = 1e-07
X = eo['pdist-double-inp']
Y_right = eo['pdist-correlation']
Y_test2 = wpdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_correlation_iris(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_correlation_iris_float32(self):
eps = 1e-07
X = eo['iris']
Y_right = np.float32(eo['pdist-correlation-iris'])
Y_test1 = wpdist(X, 'correlation')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_correlation_iris_nonC(self):
eps = 1e-08
X = eo['iris']
Y_right = eo['pdist-correlation-iris']
Y_test2 = wpdist(X, 'test_correlation')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_random(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_float32(self):
eps = 1e-05
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-minkowski-3.2']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_random_nonC(self):
eps = 1e-05
X = eo['pdist-double-inp']
Y_right = eo['pdist-minkowski-3.2']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_3_2_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=3.2)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_3_2_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-3.2-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=3.2)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_minkowski_5_8_iris(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_minkowski_5_8_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test1 = wpdist_no_const(X, 'minkowski', p=5.8)
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_minkowski_5_8_iris_nonC(self):
eps = 1e-07
X = eo['iris']
Y_right = eo['pdist-minkowski-5.8-iris']
Y_test2 = wpdist_no_const(X, 'test_minkowski', p=5.8)
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_mahalanobis(self):
# 1-dimensional observations
x = np.array([2.0, 2.0, 3.0, 5.0]).reshape(-1, 1)
dist = pdist(x, metric='mahalanobis')
assert_allclose(dist, [0.0, np.sqrt(0.5), np.sqrt(4.5),
np.sqrt(0.5), np.sqrt(4.5), np.sqrt(2.0)])
# 2-dimensional observations
x = np.array([[0, 0], [-1, 0], [0, 2], [1, 0], [0, -2]])
dist = pdist(x, metric='mahalanobis')
rt2 = np.sqrt(2)
assert_allclose(dist, [rt2, rt2, rt2, rt2, 2, 2 * rt2, 2, 2, 2 * rt2, 2])
# Too few observations
assert_raises(ValueError,
wpdist, [[0, 1], [2, 3]], metric='mahalanobis')
def test_pdist_hamming_random(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_hamming_random_nonC(self):
eps = 1e-07
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_dhamming_random(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test1 = wpdist(X, 'hamming')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_dhamming_random_nonC(self):
eps = 1e-07
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-hamming']
Y_test2 = wpdist(X, 'test_hamming')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_jaccard_random(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_jaccard_random_nonC(self):
eps = 1e-08
X = eo['pdist-boolean-inp']
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_djaccard_random(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_float32(self):
eps = 1e-08
X = np.float32(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test1 = wpdist(X, 'jaccard')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_djaccard_random_nonC(self):
eps = 1e-08
X = np.float64(eo['pdist-boolean-inp'])
Y_right = eo['pdist-jaccard']
Y_test2 = wpdist(X, 'test_jaccard')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebyshev_random(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebyshev']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebyshev_random_float32(self):
eps = 1e-07
X = np.float32(eo['pdist-double-inp'])
Y_right = eo['pdist-chebyshev']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebyshev_random_nonC(self):
eps = 1e-08
X = eo['pdist-double-inp']
Y_right = eo['pdist-chebyshev']
Y_test2 = pdist(X, 'test_chebyshev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_chebyshev_iris(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebyshev-iris']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps)
def test_pdist_chebyshev_iris_float32(self):
eps = 1e-06
X = np.float32(eo['iris'])
Y_right = eo['pdist-chebyshev-iris']
Y_test1 = pdist(X, 'chebyshev')
_assert_within_tol(Y_test1, Y_right, eps, verbose > 2)
def test_pdist_chebyshev_iris_nonC(self):
eps = 1e-15
X = eo['iris']
Y_right = eo['pdist-chebyshev-iris']
Y_test2 = pdist(X, 'test_chebyshev')
_assert_within_tol(Y_test2, Y_right, eps)
def test_pdist_matching_mtica1(self):
# Test matching(*,*) with mtica example #1 (nums).
m = wmatching(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wmatching(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_matching_mtica2(self):
# Test matching(*,*) with mtica example #2.
m = wmatching(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wmatching(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica1(self):
m = wjaccard(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wjaccard(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
assert_allclose(m, 0.6, rtol=0, atol=1e-10)
assert_allclose(m2, 0.6, rtol=0, atol=1e-10)
def test_pdist_jaccard_mtica2(self):
m = wjaccard(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wjaccard(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_yule_mtica1(self):
m = wyule(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wyule(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_yule_mtica2(self):
m = wyule(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wyule(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2, rtol=0, atol=1e-10)
assert_allclose(m2, 2, rtol=0, atol=1e-10)
def test_pdist_dice_mtica1(self):
m = wdice(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wdice(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 7, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 7, rtol=0, atol=1e-10)
def test_pdist_dice_mtica2(self):
m = wdice(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wdice(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 0.5, rtol=0, atol=1e-10)
assert_allclose(m2, 0.5, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica1(self):
m = sokalsneath(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_sokalsneath_mtica2(self):
m = wsokalsneath(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wsokalsneath(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica1(self):
m = wrogerstanimoto(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 4, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 4, rtol=0, atol=1e-10)
def test_pdist_rogerstanimoto_mtica2(self):
m = wrogerstanimoto(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrogerstanimoto(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 4 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 4 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica1(self):
m = wrussellrao(np.array([1, 0, 1, 1, 0]),
np.array([1, 1, 0, 1, 1]))
m2 = wrussellrao(np.array([1, 0, 1, 1, 0], dtype=bool),
np.array([1, 1, 0, 1, 1], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 3 / 5, rtol=0, atol=1e-10)
assert_allclose(m2, 3 / 5, rtol=0, atol=1e-10)
def test_pdist_russellrao_mtica2(self):
m = wrussellrao(np.array([1, 0, 1]),
np.array([1, 1, 0]))
m2 = wrussellrao(np.array([1, 0, 1], dtype=bool),
np.array([1, 1, 0], dtype=bool))
if verbose > 2:
print(m)
assert_allclose(m, 2 / 3, rtol=0, atol=1e-10)
assert_allclose(m2, 2 / 3, rtol=0, atol=1e-10)
def test_pdist_canberra_match(self):
D = eo['iris']
if verbose > 2:
print(D.shape, D.dtype)
eps = 1e-10
y1 = wpdist_no_const(D, "canberra")
y2 = wpdist_no_const(D, "test_canberra")
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_canberra_ticket_711(self):
# Test pdist(X, 'canberra') to see if Canberra gives the right result
# as reported on gh-1238.
eps = 1e-8
pdist_y = wpdist_no_const(([3.3], [3.4]), "canberra")
right_y = 0.01492537
_assert_within_tol(pdist_y, right_y, eps, verbose > 2)
def test_pdist_custom_notdouble(self):
# tests that when using a custom metric the data type is not altered
class myclass(object):
pass
def _my_metric(x, y):
if not isinstance(x[0], myclass) or not isinstance(y[0], myclass):
raise ValueError("Type has been changed")
return 1.123
data = np.array([[myclass()], [myclass()]], dtype=object)
pdist_y = pdist(data, metric=_my_metric)
right_y = 1.123
assert_equal(pdist_y, right_y, verbose=verbose > 2)
def _check_calling_conventions(self, X, metric, eps=1e-07, **kwargs):
# helper function for test_pdist_calling_conventions
try:
y1 = pdist(X, metric=metric, **kwargs)
y2 = pdist(X, metric=eval(metric), **kwargs)
y3 = pdist(X, metric="test_" + metric, **kwargs)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
assert_raises(e_cls, pdist, X, metric=metric, **kwargs)
assert_raises(e_cls, pdist, X, metric=eval(metric), **kwargs)
assert_raises(e_cls, pdist, X, metric="test_" + metric, **kwargs)
else:
_assert_within_tol(y1, y2, rtol=eps, verbose_=verbose > 2)
_assert_within_tol(y1, y3, rtol=eps, verbose_=verbose > 2)
def test_pdist_calling_conventions(self):
# Ensures that specifying the metric with a str or scipy function
# gives the same behaviour (i.e. same result or same exception).
# NOTE: The correctness should be checked within each metric tests.
# NOTE: Extra args should be checked with a dedicated test
eps = 1e-07
for eo_name in self.rnd_eo_names:
# subsampling input data to speed-up tests
# NOTE: num samples needs to be > than dimensions for mahalanobis
X = eo[eo_name][::5, ::2]
for metric in _METRICS_NAMES:
if metric == 'wminkowski':
continue
if verbose > 2:
print("testing: ", metric, " with: ", eo_name)
if metric in {'dice', 'yule', 'kulsinski', 'matching',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath'} and 'bool' not in eo_name:
# python version permits non-bools e.g. for fuzzy logic
continue
self._check_calling_conventions(X, metric)
# Testing built-in metrics with extra args
if metric == "seuclidean":
V = np.var(X.astype(np.double), axis=0, ddof=1)
self._check_calling_conventions(X, metric, V=V)
elif metric == "mahalanobis":
V = np.atleast_2d(np.cov(X.astype(np.double).T))
VI = np.array(np.linalg.inv(V).T)
self._check_calling_conventions(X, metric, VI=VI)
def test_pdist_dtype_equivalence(self):
# Tests that the result is not affected by type up-casting
eps = 1e-07
tests = [(eo['random-bool-data'], self.valid_upcasts['bool']),
(eo['random-uint-data'], self.valid_upcasts['uint']),
(eo['random-int-data'], self.valid_upcasts['int']),
(eo['random-float32-data'], self.valid_upcasts['float32'])]
for metric in _METRICS_NAMES:
for test in tests:
X1 = test[0][::5, ::2]
try:
y1 = pdist(X1, metric=metric)
except Exception as e:
e_cls = e.__class__
if verbose > 2:
print(e_cls.__name__)
print(e)
for new_type in test[1]:
X2 = new_type(X1)
assert_raises(e_cls, pdist, X2, metric=metric)
else:
for new_type in test[1]:
y2 = pdist(new_type(X1), metric=metric)
_assert_within_tol(y1, y2, eps, verbose > 2)
def test_pdist_out(self):
# Test that out parameter works properly
eps = 1e-07
X = eo['random-float32-data'][::5, ::2]
out_size = int((X.shape[0] * (X.shape[0] - 1)) / 2)
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X.std(axis=0)
out1 = np.empty(out_size, dtype=np.double)
Y_right = pdist(X, metric, **kwargs)
Y_test1 = pdist(X, metric, out=out1, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y_test1, Y_right, eps)
# test that Y_test1 and out1 are the same object
assert_(Y_test1 is out1)
# test for incorrect shape
out2 = np.empty(out_size + 3, dtype=np.double)
assert_raises(ValueError, pdist, X, metric, out=out2, **kwargs)
# test for (C-)contiguous output
out3 = np.empty(2 * out_size, dtype=np.double)[::2]
assert_raises(ValueError, pdist, X, metric, out=out3, **kwargs)
# test for incorrect dtype
out5 = np.empty(out_size, dtype=np.int64)
assert_raises(ValueError, pdist, X, metric, out=out5, **kwargs)
def test_striding(self):
# test that striding is handled correct with calls to
# _copy_array_if_base_present
eps = 1e-07
X = eo['random-float32-data'][::5, ::2]
X_copy = X.copy()
# confirm contiguity
assert_(not X.flags.c_contiguous)
assert_(X_copy.flags.c_contiguous)
for metric in _METRICS_NAMES:
kwargs = dict()
if metric in ['minkowski', 'wminkowski']:
kwargs['p'] = 1.23
if metric == 'wminkowski':
kwargs['w'] = 1.0 / X.std(axis=0)
Y1 = pdist(X, metric, **kwargs)
Y2 = pdist(X_copy, metric, **kwargs)
# test that output is numerically equivalent
_assert_within_tol(Y1, Y2, eps, verbose > 2)
class TestSomeDistanceFunctions(object):
def setup_method(self):
# 1D arrays
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
# 3x1 arrays
x31 = x[:, np.newaxis]
y31 = y[:, np.newaxis]
# 1x3 arrays
x13 = x31.T
y13 = y31.T
self.cases = [(x, y), (x31, y31), (x13, y13)]
def test_minkowski(self):
with suppress_warnings() as w:
w.filter(message="`wminkowski` is deprecated")
for x, y in self.cases:
dist1 = wminkowski(x, y, p=1)
assert_almost_equal(dist1, 3.0)
dist1p5 = wminkowski(x, y, p=1.5)
assert_almost_equal(dist1p5, (1.0 + 2.0**1.5)**(2. / 3))
dist2 = wminkowski(x, y, p=2)
def test_old_wminkowski(self):
with suppress_warnings() as wrn:
wrn.filter(message="`wminkowski` is deprecated")
w = np.array([1.0, 2.0, 0.5])
for x, y in self.cases:
dist1 = old_wminkowski(x, y, p=1, w=w)
assert_almost_equal(dist1, 3.0)
dist1p5 = old_wminkowski(x, y, p=1.5, w=w)
assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
dist2 = old_wminkowski(x, y, p=2, w=w)
assert_almost_equal(dist2, np.sqrt(5))
# test weights Issue #7893
arr = np.arange(4)
w = np.full_like(arr, 4)
assert_almost_equal(old_wminkowski(arr, arr + 1, p=2, w=w), 8.0)
assert_almost_equal(wminkowski(arr, arr + 1, p=2, w=w), 4.0)
def test_euclidean(self):
for x, y in self.cases:
dist = weuclidean(x, y)
assert_almost_equal(dist, np.sqrt(5))
def test_sqeuclidean(self):
for x, y in self.cases:
dist = wsqeuclidean(x, y)
assert_almost_equal(dist, 5.0)
def test_cosine(self):
for x, y in self.cases:
dist = wcosine(x, y)
assert_almost_equal(dist, 1.0 - 18.0 / (np.sqrt(14) * np.sqrt(27)))
def test_correlation(self):
xm = np.array([-1.0, 0, 1.0])
ym = np.array([-4.0 / 3, -4.0 / 3, 5.0 - 7.0 / 3])
for x, y in self.cases:
dist = wcorrelation(x, y)
assert_almost_equal(dist, 1.0 - np.dot(xm, ym) / (norm(xm) * norm(ym)))
def test_mahalanobis(self):
x = np.array([1.0, 2.0, 3.0])
y = np.array([1.0, 1.0, 5.0])
vi = np.array([[2.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 2.0]])
for x, y in self.cases:
dist = mahalanobis(x, y, vi)
assert_almost_equal(dist, np.sqrt(6.0))
class TestSquareForm(object):
checked_dtypes = [np.float64, np.float32, np.int32, np.int8, bool]
def test_squareform_matrix(self):
for dtype in self.checked_dtypes:
self.check_squareform_matrix(dtype)
def test_squareform_vector(self):
for dtype in self.checked_dtypes:
self.check_squareform_vector(dtype)
def check_squareform_matrix(self, dtype):
A = np.zeros((0, 0), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.zeros((1, 1), dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (0,))
assert_equal(rA.dtype, dtype)
A = np.array([[0, 4.2], [4.2, 0]], dtype=dtype)
rA = squareform(A)
assert_equal(rA.shape, (1,))
assert_equal(rA.dtype, dtype)
assert_array_equal(rA, np.array([4.2], dtype=dtype))
def check_squareform_vector(self, dtype):
v = np.zeros((0,), dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (1, 1))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, [[0]])
v = np.array([8.3], dtype=dtype)
rv = squareform(v)
assert_equal(rv.shape, (2, 2))
assert_equal(rv.dtype, dtype)
assert_array_equal(rv, np.array([[0, 8.3], [8.3, 0]], dtype=dtype))
def test_squareform_multi_matrix(self):
for n in xrange(2, 5):
self.check_squareform_multi_matrix(n)
def check_squareform_multi_matrix(self, n):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(len(Y.shape), 1)
A = squareform(Y)
Yr = squareform(A)
s = A.shape
k = 0
if verbose >= 3:
print(A.shape, Y.shape, Yr.shape)
assert_equal(len(s), 2)
assert_equal(len(Yr.shape), 1)
assert_equal(s[0], s[1])
for i in xrange(0, s[0]):
for j in xrange(i + 1, s[1]):
if i != j:
assert_equal(A[i, j], Y[k])
k += 1
else:
assert_equal(A[i, j], 0)
class TestNumObsY(object):
def test_num_obs_y_multi_matrix(self):
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
assert_equal(num_obs_y(Y), n)
def test_num_obs_y_1(self):
# Tests num_obs_y(y) on a condensed distance matrix over 1
# observations. Expecting exception.
assert_raises(ValueError, self.check_y, 1)
def test_num_obs_y_2(self):
# Tests num_obs_y(y) on a condensed distance matrix over 2
# observations.
assert_(self.check_y(2))
def test_num_obs_y_3(self):
assert_(self.check_y(3))
def test_num_obs_y_4(self):
assert_(self.check_y(4))
def test_num_obs_y_5_10(self):
for i in xrange(5, 16):
self.minit(i)
def test_num_obs_y_2_100(self):
# Tests num_obs_y(y) on 100 improper condensed distance matrices.
# Expecting exception.
a = set([])
for n in xrange(2, 16):
a.add(n * (n - 1) / 2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def minit(self, n):
assert_(self.check_y(n))
def bad_y(self, n):
y = np.random.rand(n)
return num_obs_y(y)
def check_y(self, n):
return num_obs_y(self.make_y(n)) == n
def make_y(self, n):
return np.random.rand((n * (n - 1)) // 2)
class TestNumObsDM(object):
def test_num_obs_dm_multi_matrix(self):
for n in xrange(1, 10):
X = np.random.rand(n, 4)
Y = wpdist_no_const(X)
A = squareform(Y)
if verbose >= 3:
print(A.shape, Y.shape)
assert_equal(num_obs_dm(A), n)
def test_num_obs_dm_0(self):
# Tests num_obs_dm(D) on a 0x0 distance matrix. Expecting exception.
assert_(self.check_D(0))
def test_num_obs_dm_1(self):
# Tests num_obs_dm(D) on a 1x1 distance matrix.
assert_(self.check_D(1))
def test_num_obs_dm_2(self):
assert_(self.check_D(2))
def test_num_obs_dm_3(self):
assert_(self.check_D(2))
def test_num_obs_dm_4(self):
assert_(self.check_D(4))
def check_D(self, n):
return num_obs_dm(self.make_D(n)) == n
def make_D(self, n):
return np.random.rand(n, n)
def is_valid_dm_throw(D):
return is_valid_dm(D, throw=True)
class TestIsValidDM(object):
def test_is_valid_dm_improper_shape_1D_E(self):
D = np.zeros((5,), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_1D_F(self):
D = np.zeros((5,), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_improper_shape_3D_E(self):
D = np.zeros((3, 3, 3), dtype=np.double)
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_improper_shape_3D_F(self):
D = np.zeros((3, 3, 3), dtype=np.double)
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_nonzero_diagonal_E(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_nonzero_diagonal_F(self):
y = np.random.rand(10)
D = squareform(y)
for i in xrange(0, 5):
D[i, i] = 2.0
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_asymmetric_E(self):
y = np.random.rand(10)
D = squareform(y)
D[1, 3] = D[3, 1] + 1
assert_raises(ValueError, is_valid_dm_throw, (D))
def test_is_valid_dm_asymmetric_F(self):
y = np.random.rand(10)
D = squareform(y)
D[1, 3] = D[3, 1] + 1
assert_equal(is_valid_dm(D), False)
def test_is_valid_dm_correct_1_by_1(self):
D = np.zeros((1, 1), dtype=np.double)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_2_by_2(self):
y = np.random.rand(1)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_3_by_3(self):
y = np.random.rand(3)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_4_by_4(self):
y = np.random.rand(6)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def test_is_valid_dm_correct_5_by_5(self):
y = np.random.rand(10)
D = squareform(y)
assert_equal(is_valid_dm(D), True)
def is_valid_y_throw(y):
return is_valid_y(y, throw=True)
class TestIsValidY(object):
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3, 3,), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3, 3,), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3, 3, 3), dtype=np.double)
assert_raises(ValueError, is_valid_y_throw, (y))
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3, 3, 3), dtype=np.double)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set([])
for n in xrange(2, 16):
a.add(n * (n - 1) / 2)
for i in xrange(5, 105):
if i not in a:
assert_raises(ValueError, self.bad_y, i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
def test_bad_p():
# Raise ValueError if p < 1.
p = 0.5
with suppress_warnings() as w:
w.filter(message="`wminkowski` is deprecated")
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p)
assert_raises(ValueError, wminkowski, [1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
assert_raises(ValueError, sokalsneath, [False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1)
assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15)
assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
assert_almost_equal(weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
np.sqrt(3), decimal=14)
assert_almost_equal(wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :]),
3.0, decimal=14)
assert_almost_equal(wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis]),
3.0, decimal=14)
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
assert_raises(ValueError, weuclidean, x, x)
assert_raises(ValueError, wsqeuclidean, x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = weuclidean(x, y)
d2 = wsqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
assert_raises(ValueError, whamming, x, y)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(whamming(a, b), desired)
def test_minkowski_w():
# Regression test for gh-8142.
arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.],
[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.]])
pdist(arr_in, metric='minkowski', p=1, w=None)
cdist(arr_in, arr_in, metric='minkowski', p=1, w=None)
pdist(arr_in, metric='minkowski', p=1)
cdist(arr_in, arr_in, metric='minkowski', p=1)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
d1 = wsqeuclidean([0], np.asarray([-1], dtype=dtype))
d2 = wsqeuclidean(np.asarray([-1], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(np.iinfo(dtype).max)**2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_sokalmichener():
# Test that sokalmichener has the same result for bool and int inputs.
p = [True, True, False]
q = [True, False, True]
x = [int(b) for b in p]
y = [int(b) for b in q]
dist1 = sokalmichener(p, q)
dist2 = sokalmichener(x, y)
# These should be exactly the same.
assert_equal(dist1, dist2)
def test_modifies_input():
# test whether cdist or pdist modifies input arrays
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
X1_copy = X1.copy()
with suppress_warnings() as w:
w.filter(message="`wminkowski` is deprecated")
for metric in _METRICS_NAMES:
kwargs = {"w": 1.0 / X1.std(axis=0)} if metric == "wminkowski" else {}
cdist(X1, X1, metric, **kwargs)
pdist(X1, metric, **kwargs)
assert_array_equal(X1, X1_copy)
def test_Xdist_deprecated_args():
# testing both cdist and pdist deprecated warnings
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
weights = np.arange(3)
warn_msg_kwargs = "Got unexpected kwarg"
warn_msg_args = "[0-9]* metric parameters have been passed as positional"
for metric in _METRICS_NAMES:
kwargs = {"w": weights} if metric == "wminkowski" else dict()
with suppress_warnings() as w:
log = w.record(message=warn_msg_args)
w.filter(message=warn_msg_kwargs)
w.filter(message="`wminkowski` is deprecated")
cdist(X1, X1, metric, 2., **kwargs)
pdist(X1, metric, 2., **kwargs)
assert_(len(log) == 2)
for arg in ["p", "V", "VI"]:
kwargs = {arg:"foo"}
if metric == "wminkowski":
if "p" in kwargs or "w" in kwargs:
continue
kwargs["w"] = weights
if((arg == "V" and metric == "seuclidean") or
(arg == "VI" and metric == "mahalanobis") or
(arg == "p" and metric == "minkowski")):
continue
with suppress_warnings() as w:
log = w.record(message=warn_msg_kwargs)
w.filter(message="`wminkowski` is deprecated")
cdist(X1, X1, metric, **kwargs)
pdist(X1, metric, **kwargs)
assert_(len(log) == 2)
def test_Xdist_non_negative_weights():
X = eo['random-float32-data'][::5, ::2]
w = np.ones(X.shape[1])
w[::5] = -w[::5]
for metric in _METRICS_NAMES:
if metric in ['seuclidean', 'mahalanobis']:
continue
for m in [metric, eval(metric), "test_" + metric]:
assert_raises(ValueError, pdist, X, m, w=w)
assert_raises(ValueError, cdist, X, X, m, w=w)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, [x])
x = np.arange(5).reshape(1, -1, 1)
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_array_equal(y, x[0, :, 0])
x = [[1, 2], [3, 4]]
assert_raises(ValueError, _validate_vector, x)
| 77,642 | 37.57079 | 112 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test_qhull.py
|
from __future__ import division, print_function, absolute_import
import os
import copy
import pytest
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_, assert_allclose, assert_array_equal)
import pytest
from pytest import raises as assert_raises
from scipy._lib.six import xrange
import scipy.spatial.qhull as qhull
from scipy.spatial import cKDTree as KDTree
from scipy.spatial import Voronoi
import itertools
def sorted_tuple(x):
return tuple(sorted(x))
def sorted_unique_tuple(x):
return tuple(np.unique(x))
def assert_unordered_tuple_list_equal(a, b, tpl=tuple):
if isinstance(a, np.ndarray):
a = a.tolist()
if isinstance(b, np.ndarray):
b = b.tolist()
a = list(map(tpl, a))
a.sort()
b = list(map(tpl, b))
b.sort()
assert_equal(a, b)
np.random.seed(1234)
points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]
pathological_data_1 = np.array([
[-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],
[-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],
[-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],
[-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],
[-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],
[-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],
[-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],
[-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],
[-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],
[0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],
[0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],
[0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],
[0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],
[1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],
[1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],
[1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],
[2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],
[2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],
[3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],
[3.14,1.57], [3.14,2.36], [3.14,3.14],
])
pathological_data_2 = np.array([
[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 0], [0, 1],
[1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],
])
bug_2850_chunks = [np.random.rand(10, 2),
np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners
]
# same with some additional chunks
bug_2850_chunks_2 = (bug_2850_chunks +
[np.random.rand(10, 2),
0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])
DATASETS = {
'some-points': np.asarray(points),
'random-2d': np.random.rand(30, 2),
'random-3d': np.random.rand(30, 3),
'random-4d': np.random.rand(30, 4),
'random-5d': np.random.rand(30, 5),
'random-6d': np.random.rand(10, 6),
'random-7d': np.random.rand(10, 7),
'random-8d': np.random.rand(10, 8),
'pathological-1': pathological_data_1,
'pathological-2': pathological_data_2
}
INCREMENTAL_DATASETS = {
'bug-2850': (bug_2850_chunks, None),
'bug-2850-2': (bug_2850_chunks_2, None),
}
def _add_inc_data(name, chunksize):
"""
Generate incremental datasets from basic data sets
"""
points = DATASETS[name]
ndim = points.shape[1]
opts = None
nmin = ndim + 2
if name == 'some-points':
# since Qz is not allowed, use QJ
opts = 'QJ Pp'
elif name == 'pathological-1':
# include enough points so that we get different x-coordinates
nmin = 12
chunks = [points[:nmin]]
for j in xrange(nmin, len(points), chunksize):
chunks.append(points[j:j+chunksize])
new_name = "%s-chunk-%d" % (name, chunksize)
assert new_name not in INCREMENTAL_DATASETS
INCREMENTAL_DATASETS[new_name] = (chunks, opts)
for name in DATASETS:
for chunksize in 1, 4, 16:
_add_inc_data(name, chunksize)
class Test_Qhull(object):
def test_swapping(self):
# Check that Qhull state swapping works
x = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),
b'Qz')
xd = copy.deepcopy(x.get_voronoi_diagram())
y = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,2.]]),
b'Qz')
yd = copy.deepcopy(y.get_voronoi_diagram())
xd2 = copy.deepcopy(x.get_voronoi_diagram())
x.close()
yd2 = copy.deepcopy(y.get_voronoi_diagram())
y.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
assert_raises(RuntimeError, y.get_voronoi_diagram)
assert_allclose(xd[0], xd2[0])
assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)
assert_array_equal(xd[4], xd2[4])
assert_allclose(yd[0], yd2[0])
assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)
assert_array_equal(yd[4], yd2[4])
x.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
y.close()
assert_raises(RuntimeError, y.get_voronoi_diagram)
def test_issue_8051(self):
points = np.array([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2],[2, 0], [2, 1], [2, 2]])
Voronoi(points)
class TestUtilities(object):
"""
Check that utility functions work.
"""
def test_find_simplex(self):
# Simple check that simplex finding works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
for p in [(0.25, 0.25, 1),
(0.75, 0.75, 0),
(0.3, 0.2, 1)]:
i = tri.find_simplex(p[:2])
assert_equal(i, p[2], err_msg='%r' % (p,))
j = qhull.tsearch(tri, p[:2])
assert_equal(i, j)
def test_plane_distance(self):
# Compare plane distance from hyperplane equations obtained from Qhull
# to manually computed plane equations
x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),
(0.99440079, 0.45182168)], dtype=np.double)
p = np.array([0.99966555, 0.15685619], dtype=np.double)
tri = qhull.Delaunay(x)
z = tri.lift_points(x)
pz = tri.lift_points(p)
dist = tri.plane_distance(p)
for j, v in enumerate(tri.vertices):
x1 = z[v[0]]
x2 = z[v[1]]
x3 = z[v[2]]
n = np.cross(x1 - x3, x2 - x3)
n /= np.sqrt(np.dot(n, n))
n *= -np.sign(n[2])
d = np.dot(n, pz - x3)
assert_almost_equal(dist[j], d)
def test_convex_hull(self):
# Simple check that the convex hull seems to works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])
def test_volume_area(self):
#Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 1., rtol=1e-14,
err_msg="Volume of cube is incorrect")
assert_allclose(hull.area, 6., rtol=1e-14,
err_msg="Area of cube is incorrect")
def test_random_volume_area(self):
#Test that the results for a random 10-point convex are
#coherent with the output of qconvex Qt s FA
points = np.array([(0.362568364506, 0.472712355305, 0.347003084477),
(0.733731893414, 0.634480295684, 0.950513180209),
(0.511239955611, 0.876839441267, 0.418047827863),
(0.0765906233393, 0.527373281342, 0.6509863541),
(0.146694972056, 0.596725793348, 0.894860986685),
(0.513808585741, 0.069576205858, 0.530890338876),
(0.512343805118, 0.663537132612, 0.037689295973),
(0.47282965018, 0.462176697655, 0.14061843691),
(0.240584597123, 0.778660020591, 0.722913476339),
(0.951271745935, 0.967000673944, 0.890661319684)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 0.14562013, rtol=1e-07,
err_msg="Volume of random polyhedron is incorrect")
assert_allclose(hull.area, 1.6670425, rtol=1e-07,
err_msg="Area of random polyhedron is incorrect")
def test_incremental_volume_area_random_input(self):
"""Test that incremental mode gives the same volume/area as
non-incremental mode and incremental mode with restart"""
nr_points = 20
dim = 3
points = np.random.random((nr_points, dim))
inc_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
inc_restart_hull = qhull.ConvexHull(points[:dim+1, :], incremental=True)
for i in range(dim+1, nr_points):
hull = qhull.ConvexHull(points[:i+1, :])
inc_hull.add_points(points[i:i+1, :])
inc_restart_hull.add_points(points[i:i+1, :], restart=True)
assert_allclose(hull.volume, inc_hull.volume, rtol=1e-7)
assert_allclose(hull.volume, inc_restart_hull.volume, rtol=1e-7)
assert_allclose(hull.area, inc_hull.area, rtol=1e-7)
assert_allclose(hull.area, inc_restart_hull.area, rtol=1e-7)
def _check_barycentric_transforms(self, tri, err_msg="",
unit_cube=False,
unit_cube_tol=0):
"""Check that a triangulation has reasonable barycentric transforms"""
vertices = tri.points[tri.vertices]
sc = 1/(tri.ndim + 1.0)
centroids = vertices.sum(axis=1) * sc
# Either: (i) the simplex has a `nan` barycentric transform,
# or, (ii) the centroid is in the simplex
def barycentric_transform(tr, x):
ndim = tr.shape[1]
r = tr[:,-1,:]
Tinv = tr[:,:-1,:]
return np.einsum('ijk,ik->ij', Tinv, x - r)
eps = np.finfo(float).eps
c = barycentric_transform(tri.transform, centroids)
olderr = np.seterr(invalid="ignore")
try:
ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)
finally:
np.seterr(**olderr)
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Invalid simplices must be (nearly) zero volume
q = vertices[:,:-1,:] - vertices[:,-1,None,:]
volume = np.array([np.linalg.det(q[k,:,:])
for k in range(tri.nsimplex)])
ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Also, find_simplex for the centroid should end up in some
# simplex for the non-degenerate cases
j = tri.find_simplex(centroids)
ok = (j != -1) | np.isnan(tri.transform[:,0,0])
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
if unit_cube:
# If in unit cube, no interior point should be marked out of hull
at_boundary = (centroids <= unit_cube_tol).any(axis=1)
at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)
ok = (j != -1) | at_boundary
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
def test_degenerate_barycentric_transforms(self):
# The triangulation should not produce invalid barycentric
# transforms that stump the simplex finding
data = np.load(os.path.join(os.path.dirname(__file__), 'data',
'degenerate_pointset.npz'))
points = data['c']
data.close()
tri = qhull.Delaunay(points)
# Check that there are not too many invalid simplices
bad_count = np.isnan(tri.transform[:,0,0]).sum()
assert_(bad_count < 21, bad_count)
# Check the transforms
self._check_barycentric_transforms(tri)
@pytest.mark.slow
def test_more_barycentric_transforms(self):
# Triangulate some "nasty" grids
eps = np.finfo(float).eps
npoints = {2: 70, 3: 11, 4: 5, 5: 3}
_is_32bit_platform = np.intp(0).itemsize < 8
for ndim in xrange(2, 6):
# Generate an uniform grid in n-d unit cube
x = np.linspace(0, 1, npoints[ndim])
grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
err_msg = "ndim=%d" % ndim
# Check using regular grid
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True)
# Check with eps-perturbations
np.random.seed(1234)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
# Check with duplicated data
tri = qhull.Delaunay(np.r_[grid, grid])
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
if not _is_32bit_platform:
# test numerically unstable, and reported to fail on 32-bit
# installs
# Check with larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1500*eps)
# Check with yet larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1e7*eps)
class TestVertexNeighborVertices(object):
def _check(self, tri):
expected = [set() for j in range(tri.points.shape[0])]
for s in tri.simplices:
for a in s:
for b in s:
if a != b:
expected[a].add(b)
indptr, indices = tri.vertex_neighbor_vertices
got = []
for j in range(tri.points.shape[0]):
got.append(set(map(int, indices[indptr[j]:indptr[j+1]])))
assert_equal(got, expected, err_msg="%r != %r" % (got, expected))
def test_triangle(self):
points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_rectangle(self):
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_complicated(self):
points = np.array([(0,0), (0,1), (1,1), (1,0),
(0.5, 0.5), (0.9, 0.5)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
class TestDelaunay(object):
"""
Check that triangulation works.
"""
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Delaunay, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.Delaunay, points_with_nan)
def test_nd_simplex(self):
# simple smoke test: triangulate a n-dimensional simplex
for nd in xrange(2, 8):
points = np.zeros((nd+1, nd))
for j in xrange(nd):
points[j,j] = 1.0
points[-1,:] = 1.0
tri = qhull.Delaunay(points)
tri.vertices.sort()
assert_equal(tri.vertices, np.arange(nd+1, dtype=int)[None,:])
assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
def test_2d_square(self):
# simple smoke test: 2d square
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])
def test_duplicate_points(self):
x = np.array([0, 1, 0, 1], dtype=np.float64)
y = np.array([0, 0, 1, 1], dtype=np.float64)
xp = np.r_[x, x]
yp = np.r_[y, y]
# shouldn't fail on duplicate points
tri = qhull.Delaunay(np.c_[x, y])
tri2 = qhull.Delaunay(np.c_[xp, yp])
def test_pathological(self):
# both should succeed
points = DATASETS['pathological-1']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
points = DATASETS['pathological-2']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
def test_joggle(self):
# Check that the option QJ indeed guarantees that all input points
# occur as vertices of the triangulation
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp")
assert_array_equal(np.unique(tri.simplices.ravel()),
np.arange(len(points)))
def test_coplanar(self):
# Check that the coplanar point output option indeed works
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points)
assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)
assert_(len(tri.coplanar) == len(points)//2)
assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)
assert_(np.all(tri.vertex_to_simplex >= 0))
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
tri = qhull.Delaunay(points, furthest_site=True)
expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull
assert_array_equal(tri.simplices, expected)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the triangulation
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Delaunay(points)
obj3 = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
if name.startswith('pathological'):
# XXX: These produce valid but different triangulations.
# They look OK when plotted, but how to check them?
assert_array_equal(np.unique(obj.simplices.ravel()),
np.arange(points.shape[0]))
assert_array_equal(np.unique(obj2.simplices.ravel()),
np.arange(points.shape[0]))
else:
assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,
tpl=sorted_tuple)
assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,
tpl=sorted_tuple)
def assert_hulls_equal(points, facets_1, facets_2):
# Check that two convex hulls constructed from the same point set
# are equal
facets_1 = set(map(sorted_tuple, facets_1))
facets_2 = set(map(sorted_tuple, facets_2))
if facets_1 != facets_2 and points.shape[1] == 2:
# The direct check fails for the pathological cases
# --- then the convex hull from Delaunay differs (due
# to rounding error etc.) from the hull computed
# otherwise, by the question whether (tricoplanar)
# points that lie almost exactly on the hull are
# included as vertices of the hull or not.
#
# So we check the result, and accept it if the Delaunay
# hull line segments are a subset of the usual hull.
eps = 1000 * np.finfo(float).eps
for a, b in facets_1:
for ap, bp in facets_2:
t = points[bp] - points[ap]
t /= np.linalg.norm(t) # tangent
n = np.array([-t[1], t[0]]) # normal
# check that the two line segments are parallel
# to the same line
c1 = np.dot(n, points[b] - points[ap])
c2 = np.dot(n, points[a] - points[ap])
if not np.allclose(np.dot(c1, n), 0):
continue
if not np.allclose(np.dot(c2, n), 0):
continue
# Check that the segment (a, b) is contained in (ap, bp)
c1 = np.dot(t, points[a] - points[ap])
c2 = np.dot(t, points[b] - points[ap])
c3 = np.dot(t, points[bp] - points[ap])
if c1 < -eps or c1 > c3 + eps:
continue
if c2 < -eps or c2 > c3 + eps:
continue
# OK:
break
else:
raise AssertionError("comparison fails")
# it was OK
return
assert_equal(facets_1, facets_2)
class TestConvexHull:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.ConvexHull, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.ConvexHull, points_with_nan)
@pytest.mark.parametrize("name", sorted(DATASETS))
def test_hull_consistency_tri(self, name):
# Check that a convex hull returned by qhull in ndim
# and the hull constructed from ndim delaunay agree
points = DATASETS[name]
tri = qhull.Delaunay(points)
hull = qhull.ConvexHull(points)
assert_hulls_equal(points, tri.convex_hull, hull.simplices)
# Check that the hull extremes are as expected
if points.shape[1] == 2:
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
else:
assert_equal(np.unique(hull.simplices), hull.vertices)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the convex hull
chunks, _ = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.ConvexHull(chunks[0], incremental=True)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.ConvexHull(points)
obj3 = qhull.ConvexHull(chunks[0], incremental=True)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
assert_hulls_equal(points, obj.simplices, obj2.simplices)
assert_hulls_equal(points, obj.simplices, obj3.simplices)
def test_vertices_2d(self):
# The vertices should be in counterclockwise order in 2-D
np.random.seed(1234)
points = np.random.rand(30, 2)
hull = qhull.ConvexHull(points)
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
# Check counterclockwiseness
x, y = hull.points[hull.vertices].T
angle = np.arctan2(y - y.mean(), x - x.mean())
assert_(np.all(np.diff(np.unwrap(angle)) > 0))
def test_volume_area(self):
# Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
tri = qhull.ConvexHull(points)
assert_allclose(tri.volume, 1., rtol=1e-14)
assert_allclose(tri.area, 6., rtol=1e-14)
class TestVoronoi:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Voronoi, masked_array)
def test_simple(self):
# Simple case with known Voronoi diagram
points = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
# qhull v o Fv Qbb Qc Qz < dat
output = """
2
5 10 1
-10.101 -10.101
0.5 0.5
1.5 0.5
0.5 1.5
1.5 1.5
2 0 1
3 3 0 1
2 0 3
3 2 0 1
4 4 3 1 2
3 4 0 3
2 0 2
3 4 0 2
2 0 4
0
12
4 0 3 0 1
4 0 1 0 1
4 1 4 1 3
4 1 2 0 3
4 2 5 0 3
4 3 4 1 2
4 3 6 0 2
4 4 5 3 4
4 4 7 2 4
4 5 8 0 4
4 6 7 0 2
4 7 8 0 4
"""
self._compare_qvoronoi(points, output)
def _compare_qvoronoi(self, points, output, **kw):
"""Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
# Parse output
output = [list(map(float, x.split())) for x in output.strip().splitlines()]
nvertex = int(output[1][0])
vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf
nregion = int(output[1][1])
regions = [[int(y)-1 for y in x[1:]]
for x in output[2+nvertex:2+nvertex+nregion]]
nridge = int(output[2+nvertex+nregion][0])
ridge_points = [[int(y) for y in x[1:3]]
for x in output[3+nvertex+nregion:]]
ridge_vertices = [[int(y)-1 for y in x[3:]]
for x in output[3+nvertex+nregion:]]
# Compare results
vor = qhull.Voronoi(points, **kw)
def sorttuple(x):
return tuple(sorted(x))
assert_allclose(vor.vertices, vertices)
assert_equal(set(map(tuple, vor.regions)),
set(map(tuple, regions)))
p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))
p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
list(map(sorttuple, vor.ridge_vertices))))
p1.sort()
p2.sort()
assert_equal(p1, p2)
@pytest.mark.parametrize("name", sorted(DATASETS))
def test_ridges(self, name):
# Check that the ridges computed by Voronoi indeed separate
# the regions of nearest neighborhood, by comparing the result
# to KDTree.
points = DATASETS[name]
tree = KDTree(points)
vor = qhull.Voronoi(points)
for p, v in vor.ridge_dict.items():
# consider only finite ridges
if not np.all(np.asarray(v) >= 0):
continue
ridge_midpoint = vor.vertices[v].mean(axis=0)
d = 1e-6 * (points[p[0]] - ridge_midpoint)
dist, k = tree.query(ridge_midpoint + d, k=1)
assert_equal(k, p[0])
dist, k = tree.query(ridge_midpoint - d, k=1)
assert_equal(k, p[1])
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
# qhull v o Fv Qbb Qc Qu < dat
output = """
2
3 5 1
-10.101 -10.101
0.6000000000000001 0.5
0.5 0.6000000000000001
3 0 1 2
2 0 1
2 0 2
0
3 0 1 2
5
4 0 2 0 2
4 0 1 0 1
4 0 4 1 2
4 1 4 0 1
4 2 4 0 2
"""
self._compare_qvoronoi(points, output, furthest_site=True)
@pytest.mark.parametrize("name", sorted(INCREMENTAL_DATASETS))
def test_incremental(self, name):
# Test incremental construction of the triangulation
if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
# too slow (testing of the result --- qhull is still fast)
return
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Voronoi(points)
obj3 = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# -- Check that the incremental mode agrees with upfront mode
assert_equal(len(obj.point_region), len(obj2.point_region))
assert_equal(len(obj.point_region), len(obj3.point_region))
# The vertices may be in different order or duplicated in
# the incremental map
for objx in obj, obj3:
vertex_map = {-1: -1}
for i, v in enumerate(objx.vertices):
for j, v2 in enumerate(obj2.vertices):
if np.allclose(v, v2):
vertex_map[i] = j
def remap(x):
if hasattr(x, '__len__'):
return tuple(set([remap(y) for y in x]))
try:
return vertex_map[x]
except KeyError:
raise AssertionError("incremental result has spurious vertex at %r"
% (objx.vertices[x],))
def simplified(x):
items = set(map(sorted_tuple, x))
if () in items:
items.remove(())
items = [x for x in items if len(x) > 1]
items.sort()
return items
assert_equal(
simplified(remap(objx.regions)),
simplified(obj2.regions)
)
assert_equal(
simplified(remap(objx.ridge_vertices)),
simplified(obj2.ridge_vertices)
)
# XXX: compare ridge_points --- not clear exactly how to do this
class Test_HalfspaceIntersection(object):
def assert_unordered_allclose(self, arr1, arr2, rtol=1e-7):
"""Check that every line in arr1 is only once in arr2"""
assert_equal(arr1.shape, arr2.shape)
truths = np.zeros((arr1.shape[0],), dtype=bool)
for l1 in arr1:
indexes = np.where((abs(arr2 - l1) < rtol).all(axis=1))[0]
assert_equal(indexes.shape, (1,))
truths[indexes[0]] = True
assert_(truths.all())
def test_cube_halfspace_intersection(self):
halfspaces = np.array([[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 1.0, -1.0]])
feasible_point = np.array([0.5, 0.5])
points = np.array([[0.0, 1.0], [1.0, 1.0], [0.0, 0.0], [1.0, 0.0]])
hull = qhull.HalfspaceIntersection(halfspaces, feasible_point)
assert_allclose(points, hull.intersections)
def test_self_dual_polytope_intersection(self):
fname = os.path.join(os.path.dirname(__file__), 'data',
'selfdual-4d-polytope.txt')
ineqs = np.genfromtxt(fname)
halfspaces = -np.hstack((ineqs[:, 1:], ineqs[:, :1]))
feas_point = np.array([0., 0., 0., 0.])
hs = qhull.HalfspaceIntersection(halfspaces, feas_point)
assert_equal(hs.intersections.shape, (24, 4))
assert_almost_equal(hs.dual_volume, 32.0)
assert_equal(len(hs.dual_facets), 24)
for facet in hs.dual_facets:
assert_equal(len(facet), 6)
dists = halfspaces[:, -1] + halfspaces[:, :-1].dot(feas_point)
self.assert_unordered_allclose((halfspaces[:, :-1].T/dists).T, hs.dual_points)
points = itertools.permutations([0., 0., 0.5, -0.5])
for point in points:
assert_equal(np.sum((hs.intersections == point).all(axis=1)), 1)
def test_wrong_feasible_point(self):
halfspaces = np.array([[-1.0, 0.0, 0.0],
[0.0, -1.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 1.0, -1.0]])
feasible_point = np.array([0.5, 0.5, 0.5])
#Feasible point is (ndim,) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5], [0.5]])
#Feasible point is (ndim-1, 1) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([[0.5, 0.5]])
#Feasible point is (1, ndim-1) instead of (ndim-1,)
assert_raises(ValueError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
feasible_point = np.array([-0.5, -0.5])
#Feasible point is outside feasible region
assert_raises(qhull.QhullError, qhull.HalfspaceIntersection, halfspaces, feasible_point)
def test_incremental(self):
#Cube
halfspaces = np.array([[0., 0., -1., -0.5],
[0., -1., 0., -0.5],
[-1., 0., 0., -0.5],
[1., 0., 0., -0.5],
[0., 1., 0., -0.5],
[0., 0., 1., -0.5]])
#Cut each summit
extra_normals = np.array([[1., 1., 1.],
[1., 1., -1.],
[1., -1., 1.],
[1, -1., -1.]])
offsets = np.array([[-1.]]*8)
extra_halfspaces = np.hstack((np.vstack((extra_normals, -extra_normals)),
offsets))
feas_point = np.array([0., 0., 0.])
inc_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
inc_res_hs = qhull.HalfspaceIntersection(halfspaces, feas_point, incremental=True)
for i, ehs in enumerate(extra_halfspaces):
inc_hs.add_halfspaces(ehs[np.newaxis, :])
inc_res_hs.add_halfspaces(ehs[np.newaxis, :], restart=True)
total = np.vstack((halfspaces, extra_halfspaces[:i+1, :]))
hs = qhull.HalfspaceIntersection(total, feas_point)
assert_allclose(inc_hs.halfspaces, inc_res_hs.halfspaces)
assert_allclose(inc_hs.halfspaces, hs.halfspaces)
#Direct computation and restart should have points in same order
assert_allclose(hs.intersections, inc_res_hs.intersections)
#Incremental will have points in different order than direct computation
self.assert_unordered_allclose(inc_hs.intersections, hs.intersections)
inc_hs.close()
| 36,813 | 35.485629 | 98 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test__plotutils.py
|
from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_, assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import MatplotlibDeprecationWarning
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| 2,140 | 34.098361 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test__procrustes.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_almost_equal
from pytest import raises as assert_raises
from scipy.spatial import procrustes
class TestProcrustes(object):
def setup_method(self):
"""creates inputs"""
# an L
self.data1 = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
# a larger, shifted, mirrored L
self.data2 = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
# an L shifted up 1, right 1, and with point 4 shifted an extra .5
# to the right
# pointwise distance disparity with data1: 3*(2) + (1 + 1.5^2)
self.data3 = np.array([[2, 4], [2, 3], [2, 2], [3, 2.5]], 'd')
# data4, data5 are standardized (trace(A*A') = 1).
# procrustes should return an identical copy if they are used
# as the first matrix argument.
shiftangle = np.pi / 8
self.data4 = np.array([[1, 0], [0, 1], [-1, 0],
[0, -1]], 'd') / np.sqrt(4)
self.data5 = np.array([[np.cos(shiftangle), np.sin(shiftangle)],
[np.cos(np.pi / 2 - shiftangle),
np.sin(np.pi / 2 - shiftangle)],
[-np.cos(shiftangle),
-np.sin(shiftangle)],
[-np.cos(np.pi / 2 - shiftangle),
-np.sin(np.pi / 2 - shiftangle)]],
'd') / np.sqrt(4)
def test_procrustes(self):
# tests procrustes' ability to match two matrices.
#
# the second matrix is a rotated, shifted, scaled, and mirrored version
# of the first, in two dimensions only
#
# can shift, mirror, and scale an 'L'?
a, b, disparity = procrustes(self.data1, self.data2)
assert_allclose(b, a)
assert_almost_equal(disparity, 0.)
# if first mtx is standardized, leaves first mtx unchanged?
m4, m5, disp45 = procrustes(self.data4, self.data5)
assert_equal(m4, self.data4)
# at worst, data3 is an 'L' with one point off by .5
m1, m3, disp13 = procrustes(self.data1, self.data3)
#assert_(disp13 < 0.5 ** 2)
def test_procrustes2(self):
# procrustes disparity should not depend on order of matrices
m1, m3, disp13 = procrustes(self.data1, self.data3)
m3_2, m1_2, disp31 = procrustes(self.data3, self.data1)
assert_almost_equal(disp13, disp31)
# try with 3d, 8 pts per
rand1 = np.array([[2.61955202, 0.30522265, 0.55515826],
[0.41124708, -0.03966978, -0.31854548],
[0.91910318, 1.39451809, -0.15295084],
[2.00452023, 0.50150048, 0.29485268],
[0.09453595, 0.67528885, 0.03283872],
[0.07015232, 2.18892599, -1.67266852],
[0.65029688, 1.60551637, 0.80013549],
[-0.6607528, 0.53644208, 0.17033891]])
rand3 = np.array([[0.0809969, 0.09731461, -0.173442],
[-1.84888465, -0.92589646, -1.29335743],
[0.67031855, -1.35957463, 0.41938621],
[0.73967209, -0.20230757, 0.52418027],
[0.17752796, 0.09065607, 0.29827466],
[0.47999368, -0.88455717, -0.57547934],
[-0.11486344, -0.12608506, -0.3395779],
[-0.86106154, -0.28687488, 0.9644429]])
res1, res3, disp13 = procrustes(rand1, rand3)
res3_2, res1_2, disp31 = procrustes(rand3, rand1)
assert_almost_equal(disp13, disp31)
def test_procrustes_shape_mismatch(self):
assert_raises(ValueError, procrustes,
np.array([[1, 2], [3, 4]]),
np.array([[5, 6, 7], [8, 9, 10]]))
def test_procrustes_empty_rows_or_cols(self):
empty = np.array([[]])
assert_raises(ValueError, procrustes, empty, empty)
def test_procrustes_no_variation(self):
assert_raises(ValueError, procrustes,
np.array([[42, 42], [42, 42]]),
np.array([[45, 45], [45, 45]]))
def test_procrustes_bad_number_of_dimensions(self):
# fewer dimensions in one dataset
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([[1, 2], [3, 4]]))
# fewer dimensions in both datasets
assert_raises(ValueError, procrustes,
np.array([1, 1, 2, 3, 5, 8]),
np.array([1, 1, 2, 3, 5, 8]))
# zero dimensions
assert_raises(ValueError, procrustes, np.array(7), np.array(11))
# extra dimensions
assert_raises(ValueError, procrustes,
np.array([[[11], [7]]]),
np.array([[[5, 13]]]))
| 5,048 | 41.428571 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test_kdtree.py
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_)
from pytest import raises as assert_raises
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial.ckdtree import cKDTreeNode
from scipy.spatial import minkowski_distance
import itertools
def distance_box(a, b, p, boxsize):
diff = a - b
diff[diff > 0.5 * boxsize] -= boxsize
diff[diff < -0.5 * boxsize] += boxsize
d = minkowski_distance(diff, 0, p)
return d
class ConsistencyTests:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,2) < d**2+eps),hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,1) < d+eps),hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,self.distance(x,self.data[near_i],np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(self.distance(self.data,x,np.inf) < d+eps),hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
class Test_random(ConsistencyTests):
def setup_method(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = KDTree(self.data,leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
class Test_random_far(Test_random):
def setup_method(self):
Test_random.setup_method(self)
self.x = np.random.randn(self.m)+10
class Test_small(ConsistencyTests):
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 1),
(0.1,0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 2),
([0.1,0.9],[0,1]))
class Test_small_nonleaf(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = KDTree(self.data,leafsize=1)
class Test_small_compiled(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_small_nonleaf_compiled(Test_small):
def setup_method(self):
Test_small.setup_method(self)
self.kdtree = cKDTree(self.data,leafsize=1)
class Test_random_compiled(Test_random):
def setup_method(self):
Test_random.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_random_far_compiled(Test_random_far):
def setup_method(self):
Test_random_far.setup_method(self)
self.kdtree = cKDTree(self.data)
class Test_vectorization:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0,0,0]))
assert_(isinstance(d,float))
assert_(np.issubdtype(i, np.signedinteger))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0,0,0]),k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
def test_single_query_all_neighbors(self):
d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1)
assert_(isinstance(d,list))
assert_(isinstance(i,list))
def test_vectorized_query_all_neighbors(self):
d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1)
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
assert_(isinstance(d[0,0],list))
assert_(isinstance(i[0,0],list))
class Test_vectorization_compiled:
def setup_method(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0,0,0])
assert_(isinstance(d,float))
assert_(isinstance(i,int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3,1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs,ds,i_s):
assert_equal(self.kdtree.query(q),(d,i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0,0,0],k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
class ball_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_in_ball(self):
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class Test_random_ball(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = KDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_random_ball_compiled(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = cKDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_random_ball_compiled_periodic(ball_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 10000
m = 4
np.random.seed(1234)
self.data = np.random.uniform(size=(n,m))
self.T = cKDTree(self.data,leafsize=2, boxsize=1)
self.x = np.ones(m) * 0.1
self.p = 2.
self.eps = 0
self.d = 0.2
def test_in_ball_outside(self):
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all_outside(self):
c = np.ones(self.T.n,dtype=bool)
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class Test_random_ball_approx(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.eps = 0.1
class Test_random_ball_approx_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.eps = 0.1
class Test_random_ball_approx_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.eps = 0.1
class Test_random_ball_far(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.d = 2.
class Test_random_ball_far_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.d = 2.
class Test_random_ball_far_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.d = 2.
class Test_random_ball_l1(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.p = 1
class Test_random_ball_l1_compiled(Test_random_ball_compiled):
def setup_method(self):
Test_random_ball_compiled.setup_method(self)
self.p = 1
class Test_random_ball_l1_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.p = 1
class Test_random_ball_linf(Test_random_ball):
def setup_method(self):
Test_random_ball.setup_method(self)
self.p = np.inf
class Test_random_ball_linf_compiled_periodic(Test_random_ball_compiled_periodic):
def setup_method(self):
Test_random_ball_compiled_periodic.setup_method(self)
self.p = np.inf
def test_random_ball_vectorized():
n = 20
m = 5
T = KDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_random_ball_vectorized_compiled():
n = 20
m = 5
np.random.seed(1234)
T = cKDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_query_ball_point_multithreading():
np.random.seed(0)
n = 5000
k = 2
points = np.random.randn(n,k)
T = cKDTree(points)
l1 = T.query_ball_point(points,0.003,n_jobs=1)
l2 = T.query_ball_point(points,0.003,n_jobs=64)
l3 = T.query_ball_point(points,0.003,n_jobs=-1)
for i in range(n):
if l1[i] or l2[i]:
assert_array_equal(l1[i],l2[i])
for i in range(n):
if l1[i] or l3[i]:
assert_array_equal(l1[i],l3[i])
class two_trees_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(self.distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n,dtype=bool)
c[l] = False
assert_(np.all(self.distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps)))
class Test_two_random_trees(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = KDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = KDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_compiled(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = cKDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = cKDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_compiled_periodic(two_trees_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.uniform(size=(n,m))
self.T1 = cKDTree(self.data1,leafsize=2, boxsize=1.0)
self.data2 = np.random.uniform(size=(n,m))
self.T2 = cKDTree(self.data2,leafsize=2, boxsize=1.0)
self.p = 2.
self.eps = 0
self.d = 0.2
class Test_two_random_trees_far(Test_two_random_trees):
def setup_method(self):
Test_two_random_trees.setup_method(self)
self.d = 2
class Test_two_random_trees_far_compiled(Test_two_random_trees_compiled):
def setup_method(self):
Test_two_random_trees_compiled.setup_method(self)
self.d = 2
class Test_two_random_trees_far_compiled_periodic(Test_two_random_trees_compiled_periodic):
def setup_method(self):
Test_two_random_trees_compiled_periodic.setup_method(self)
self.d = 2
class Test_two_random_trees_linf(Test_two_random_trees):
def setup_method(self):
Test_two_random_trees.setup_method(self)
self.p = np.inf
class Test_two_random_trees_linf_compiled(Test_two_random_trees_compiled):
def setup_method(self):
Test_two_random_trees_compiled.setup_method(self)
self.p = np.inf
class Test_two_random_trees_linf_compiled_periodic(Test_two_random_trees_compiled_periodic):
def setup_method(self):
Test_two_random_trees_compiled_periodic.setup_method(self)
self.p = np.inf
class Test_rectangle:
def setup_method(self):
self.rect = Rectangle([0,0],[1,1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0,0.1)
assert_array_equal(less.maxes,[0.1,1])
assert_array_equal(less.mins,[0,0])
assert_array_equal(greater.maxes,[1,1])
assert_array_equal(greater.mins,[0.1,0])
def test_distance_l2():
assert_almost_equal(minkowski_distance([0,0],[1,1],2),np.sqrt(2))
def test_distance_l1():
assert_almost_equal(minkowski_distance([0,0],[1,1],1),2)
def test_distance_linf():
assert_almost_equal(minkowski_distance([0,0],[1,1],np.inf),1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10,1,3)
y = np.random.randn(1,7,3)
assert_equal(minkowski_distance(x,y).shape,(10,7))
class count_neighbors_consistency:
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class Test_count_neighbors(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
class Test_count_neighbors_compiled(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = cKDTree(np.random.randn(n,m),leafsize=2)
self.T2 = cKDTree(np.random.randn(n,m),leafsize=2)
class sparse_distance_matrix_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
self.distance(self.T1.data[i], self.T2.data[j], self.p),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
class Test_sparse_distance_matrix(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.r = 0.5
self.p = 2
self.data1 = data1
self.data2 = data2
self.n = n
self.m = m
class Test_sparse_distance_matrix_compiled(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(0)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.ref_T1 = KDTree(data1, leafsize=2)
self.ref_T2 = KDTree(data2, leafsize=2)
self.r = 0.5
self.n = n
self.m = m
self.data1 = data1
self.data2 = data2
self.p = 2
def test_consistency_with_python(self):
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
def test_against_logic_error_regression(self):
# regression test for gh-5077 logic error
np.random.seed(0)
too_many = np.array(np.random.randn(18, 2), dtype=int)
tree = cKDTree(too_many, balanced_tree=False, compact_nodes=False)
d = tree.sparse_distance_matrix(tree, 3).todense()
assert_array_almost_equal(d, d.T, decimal=14)
def test_ckdtree_return_types(self):
# brute-force reference
ref = np.zeros((self.n,self.n))
for i in range(self.n):
for j in range(self.n):
v = self.data1[i,:] - self.data2[j,:]
ref[i,j] = np.dot(v,v)
ref = np.sqrt(ref)
ref[ref > self.r] = 0.
# test return type 'dict'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict')
for i,j in r.keys():
dist[i,j] = r[(i,j)]
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'ndarray'
dist = np.zeros((self.n,self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='ndarray')
for k in range(r.shape[0]):
i = r['i'][k]
j = r['j'][k]
v = r['v'][k]
dist[i,j] = v
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'dok_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='dok_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
# test return type 'coo_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='coo_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
assert_equal(ds.shape, (m,n))
for i in range(m):
for j in range(n):
assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
dsl = distance_matrix(xs,ys,threshold=1)
assert_equal(ds,dsl)
def check_onetree_query(T,d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i,j))
assert_(s == T.query_pairs(d))
def test_onetree_query():
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n,k)
T = KDTree(points)
check_onetree_query(T, 0.1)
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = KDTree(points)
check_onetree_query(T, 0.1)
check_onetree_query(T, 0.001)
check_onetree_query(T, 0.00001)
check_onetree_query(T, 1e-6)
def test_onetree_query_compiled():
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n,k)
T = cKDTree(points)
check_onetree_query(T, 0.1)
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = cKDTree(points)
check_onetree_query(T, 0.1)
check_onetree_query(T, 0.001)
check_onetree_query(T, 0.00001)
check_onetree_query(T, 1e-6)
def test_query_pairs_single_node():
tree = KDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_query_pairs_single_node_compiled():
tree = cKDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_ckdtree_query_pairs():
np.random.seed(0)
n = 50
k = 2
r = 0.1
r2 = r**2
points = np.random.randn(n,k)
T = cKDTree(points)
# brute force reference
brute = set()
for i in range(n):
for j in range(i+1,n):
v = points[i,:] - points[j,:]
if np.dot(v,v) <= r2:
brute.add((i,j))
l0 = sorted(brute)
# test default return type
s = T.query_pairs(r)
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'set'
s = T.query_pairs(r, output_type='set')
l1 = sorted(s)
assert_array_equal(l0,l1)
# test return type 'ndarray'
s = set()
arr = T.query_pairs(r, output_type='ndarray')
for i in range(arr.shape[0]):
s.add((int(arr[i,0]),int(arr[i,1])))
l2 = sorted(s)
assert_array_equal(l0,l2)
def test_ball_point_ints():
# Regression test for #1373.
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=float)
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
def test_kdtree_comparisons():
# Regression test: node comparisons were done wrong in 0.12 w/Py3.
nodes = [KDTree.node() for _ in range(3)]
assert_equal(sorted(nodes), sorted(nodes[::-1]))
def test_ckdtree_build_modes():
# check if different build modes for cKDTree give
# similar query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points).query(points, k=5)[-1]
T2 = cKDTree(points, compact_nodes=False).query(points, k=5)[-1]
T3 = cKDTree(points, balanced_tree=False).query(points, k=5)[-1]
T4 = cKDTree(points, compact_nodes=False, balanced_tree=False).query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
assert_array_equal(T1, T4)
def test_ckdtree_pickle():
# test if it is possible to pickle
# a cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = cKDTree(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_pickle_boxsize():
# test if it is possible to pickle a periodic
# cKDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.uniform(size=(n, k))
T1 = cKDTree(points, boxsize=1.0)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_copy_data():
# check if copy_data=True makes the kd-tree
# impervious to data corruption by modification of
# the data arrray
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points, copy_data=True)
q = points.copy()
T1 = T.query(q, k=5)[-1]
points[...] = np.random.randn(n, k)
T2 = T.query(q, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_parallel():
# check if parallel=True also generates correct
# query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = cKDTree(points)
T1 = T.query(points, k=5, n_jobs=64)[-1]
T2 = T.query(points, k=5, n_jobs=-1)[-1]
T3 = T.query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
def test_ckdtree_view():
# Check that the nodes can be correctly viewed from Python.
# This test also sanity checks each node in the cKDTree, and
# thus verifies the internal structure of the kd-tree.
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n, k)
kdtree = cKDTree(points)
# walk the whole kd-tree and sanity check each node
def recurse_tree(n):
assert_(isinstance(n, cKDTreeNode))
if n.split_dim == -1:
assert_(n.lesser is None)
assert_(n.greater is None)
assert_(n.indices.shape[0] <= kdtree.leafsize)
else:
recurse_tree(n.lesser)
recurse_tree(n.greater)
x = n.lesser.data_points[:, n.split_dim]
y = n.greater.data_points[:, n.split_dim]
assert_(x.max() < y.min())
recurse_tree(kdtree.tree)
# check that indices are correctly retrieved
n = kdtree.tree
assert_array_equal(np.sort(n.indices), range(100))
# check that data_points are correctly retrieved
assert_array_equal(kdtree.data[n.indices, :], n.data_points)
# cKDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
def test_ckdtree_list_k():
# check ckdtree periodic boundary
n = 200
m = 2
klist = [1, 2, 3]
kint = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
# check agreement between arange(1,k+1) and k
dd, ii = kdtree.query(data, klist)
dd1, ii1 = kdtree.query(data, kint)
assert_equal(dd, dd1)
assert_equal(ii, ii1)
# now check skipping one element
klist = np.array([1, 3])
kint = 3
dd, ii = kdtree.query(data, kint)
dd1, ii1 = kdtree.query(data, klist)
assert_equal(dd1, dd[..., klist - 1])
assert_equal(ii1, ii[..., klist - 1])
# check k == 1 special case
# and k == [1] non-special case
dd, ii = kdtree.query(data, 1)
dd1, ii1 = kdtree.query(data, [1])
assert_equal(len(dd.shape), 1)
assert_equal(len(dd1.shape), 2)
assert_equal(dd, np.ravel(dd1))
assert_equal(ii, np.ravel(ii1))
def test_ckdtree_box():
# check ckdtree periodic boundary
n = 2000
m = 3
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=1.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, 3.0, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree.query(data + 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd1, ii1 = kdtree.query(data - 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p)
assert_almost_equal(dd, dd2)
assert_equal(ii, ii2)
def test_ckdtree_box_0boxsize():
# check ckdtree periodic boundary that mimics non-periodic
n = 2000
m = 2
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = cKDTree(data, leafsize=1, boxsize=0.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = cKDTree(data, leafsize=1)
for p in [1, 2, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree2.query(data, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
def test_ckdtree_box_upper_bounds():
data = np.linspace(0, 2, 10).reshape(-1, 2)
data[:, 1] += 10
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=(0.0, 2.0))
# skip a dimension.
cKDTree(data, leafsize=1, boxsize=(2.0, 0.0))
def test_ckdtree_box_lower_bounds():
data = np.linspace(-1, 1, 10)
assert_raises(ValueError, cKDTree, data, leafsize=1, boxsize=1.0)
def simulate_periodic_box(kdtree, data, k, boxsize, p):
dd = []
ii = []
x = np.arange(3 ** data.shape[1])
nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T
nn = nn - 1.0
for n in nn:
image = data + n * 1.0 * boxsize
dd2, ii2 = kdtree.query(image, k, p=p)
dd2 = dd2.reshape(-1, k)
ii2 = ii2.reshape(-1, k)
dd.append(dd2)
ii.append(ii2)
dd = np.concatenate(dd, axis=-1)
ii = np.concatenate(ii, axis=-1)
result = np.empty([len(data), len(nn) * k], dtype=[
('ii', 'i8'),
('dd', 'f8')])
result['ii'][:] = ii
result['dd'][:] = dd
result.sort(order='dd')
return result['dd'][:, :k], result['ii'][:,:k]
def test_ckdtree_memuse():
# unit test adaptation of gh-5630
# NOTE: this will fail when run via valgrind,
# because rss is no longer a reliable memory usage indicator.
try:
import resource
except ImportError:
# resource is not available on Windows with Python 2.6
return
# Make some data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
z_copy = np.empty_like(z)
z_copy[:] = z
# Place FILLVAL in z_copy at random number of random locations
FILLVAL = 99.
mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
z_copy.flat[mask] = FILLVAL
igood = np.vstack(np.where(x != FILLVAL)).T
ibad = np.vstack(np.where(x == FILLVAL)).T
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# burn-in
for i in range(10):
tree = cKDTree(igood)
# count memleaks while constructing and querying cKDTree
num_leaks = 0
for i in range(100):
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
tree = cKDTree(igood)
dist, iquery = tree.query(ibad, k=4, p=2)
new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if new_mem_use > mem_use:
num_leaks += 1
# ideally zero leaks, but errors might accidentally happen
# outside cKDTree
assert_(num_leaks < 10)
def test_ckdtree_weights():
data = np.linspace(0, 1, 4).reshape(-1, 1)
tree1 = cKDTree(data, leafsize=1)
weights = np.ones(len(data), dtype='f4')
nw = tree1._build_weights(weights)
assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1])
assert_raises(ValueError, tree1._build_weights, weights[:-1])
for i in range(10):
# since weights are uniform, these shall agree:
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i))
c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, weights))
c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, None))
c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(None, weights))
c5 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=weights)
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
assert_array_equal(c1, c4)
for i in range(len(data)):
# this tests removal of one data point by setting weight to 0
w1 = weights.copy()
w1[i] = 0
data2 = data[w1 != 0]
w2 = weights[w1 != 0]
tree2 = cKDTree(data2)
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100),
weights=(w1, w1))
# "c2 is correct"
c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100))
assert_array_equal(c1, c2)
#this asserts for two different trees, singular weights
# crashes
assert_raises(ValueError, tree1.count_neighbors,
tree2, np.linspace(0, 10, 100), weights=w1)
def test_ckdtree_count_neighbous_multiple_r():
n = 2000
m = 2
np.random.seed(1234)
data = np.random.normal(size=(n, m))
kdtree = cKDTree(data, leafsize=1)
r0 = [0, 0.01, 0.01, 0.02, 0.05]
i0 = np.arange(len(r0))
n0 = kdtree.count_neighbors(kdtree, r0)
nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False)
assert_equal(n0, nnc.cumsum())
for i, r in zip(itertools.permutations(i0),
itertools.permutations(r0)):
# permute n0 by i and it shall agree
n = kdtree.count_neighbors(kdtree, r)
assert_array_equal(n, n0[list(i)])
def test_len0_arrays():
# make sure len-0 arrays are handled correctly
# in range queries (gh-5639)
np.random.seed(1234)
X = np.random.rand(10,2)
Y = np.random.rand(10,2)
tree = cKDTree(X)
# query_ball_point (single)
d,i = tree.query([.5, .5], k=1)
z = tree.query_ball_point([.5, .5], 0.1*d)
assert_array_equal(z, [])
# query_ball_point (multiple)
d,i = tree.query(Y, k=1)
mind = d.min()
z = tree.query_ball_point(Y, 0.1*mind)
y = np.empty(shape=(10,), dtype=object)
y.fill([])
assert_array_equal(y, z)
# query_ball_tree
other = cKDTree(Y)
y = tree.query_ball_tree(other, 0.1*mind)
assert_array_equal(10*[[]], y)
# count_neighbors
y = tree.count_neighbors(other, 0.1*mind)
assert_(y == 0)
# sparse_distance_matrix
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix')
assert_array_equal(y == np.zeros((10,10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict')
assert_equal(y, {})
y = tree.sparse_distance_matrix(other,0.1*mind, output_type='ndarray')
_dtype = [('i',np.intp), ('j',np.intp), ('v',np.float64)]
res_dtype = np.dtype(_dtype, align=True)
z = np.empty(shape=(0,), dtype=res_dtype)
assert_array_equal(y, z)
# query_pairs
d,i = tree.query(X, k=2)
mind = d[:,-1].min()
y = tree.query_pairs(0.1*mind, output_type='set')
assert_equal(y, set())
y = tree.query_pairs(0.1*mind, output_type='ndarray')
z = np.empty(shape=(0,2), dtype=np.intp)
assert_array_equal(y, z)
def test_ckdtree_duplicated_inputs():
# check ckdtree with duplicated inputs
n = 1024
for m in range(1, 8):
data = np.concatenate([
np.ones((n // 2, m)) * 1,
np.ones((n // 2, m)) * 2], axis=0)
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree(data, leafsize=1)
assert_equal(kdtree.size, 3)
kdtree = cKDTree(data)
assert_equal(kdtree.size, 3)
# if compact_nodes are disabled, the number
# of nodes is n (per leaf) + (m - 1)* 2 (splits per dimension) + 1
# and the root
kdtree = cKDTree(data, compact_nodes=False, leafsize=1)
assert_equal(kdtree.size, n + m * 2 - 1)
def test_ckdtree_noncumulative_nondecreasing():
# check ckdtree with duplicated inputs
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = cKDTree([[0]], leafsize=1)
assert_raises(ValueError, kdtree.count_neighbors,
kdtree, [0.1, 0], cumulative=False)
def test_short_knn():
# The test case is based on github: #6425 by @SteveDoyle2
xyz = np.array([
[0., 0., 0.],
[1.01, 0., 0.],
[0., 1., 0.],
[0., 1.01, 0.],
[1., 0., 0.],
[1., 1., 0.],],
dtype='float64')
ckdt = cKDTree(xyz)
deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2)
assert_array_almost_equal(deq,
[[0., np.inf, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., np.inf, np.inf, np.inf]])
| 41,504 | 30.136534 | 102 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test_spherical_voronoi.py
|
from __future__ import print_function
import numpy as np
import itertools
from numpy.testing import (assert_equal,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal)
from pytest import raises as assert_raises
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
class TestCircumcenters(object):
def test_circumcenters(self):
tetrahedrons = np.array([
[[1, 2, 3],
[-1.1, -2.1, -3.1],
[-1.2, 2.2, 3.2],
[-1.3, -2.3, 3.3]],
[[10, 20, 30],
[-10.1, -20.1, -30.1],
[-10.2, 20.2, 30.2],
[-10.3, -20.3, 30.3]]
])
result = spherical_voronoi.calc_circumcenters(tetrahedrons)
expected = [
[-0.5680861153262529, -0.133279590288315, 0.1843323216995444],
[-0.5965330784014926, -0.1480377040397778, 0.1981967854886021]
]
assert_array_almost_equal(result, expected)
class TestProjectToSphere(object):
def test_unit_sphere(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0, 0, 0])
radius = 1
projected = spherical_voronoi.project_to_sphere(points, center, radius)
assert_array_almost_equal(points, projected)
def test_scaled_points(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0, 0, 0])
radius = 1
scaled = points * 2
projected = spherical_voronoi.project_to_sphere(scaled, center, radius)
assert_array_almost_equal(points, projected)
def test_translated_sphere(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([1, 2, 3])
translated = points + center
radius = 1
projected = spherical_voronoi.project_to_sphere(translated, center,
radius)
assert_array_almost_equal(translated, projected)
class TestSphericalVoronoi(object):
def setup_method(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
# user input checks in SphericalVoronoi now require
# the radius / center to match the generators so adjust
# accordingly here
s2 = SphericalVoronoi(self.points * radius, radius)
s3 = SphericalVoronoi(self.points + center, None, center)
s4 = SphericalVoronoi(self.points * radius + center, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
assert_equal(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
assert_equal(s2.radius, 2)
assert_array_equal(s3.center, center)
assert_equal(s3.radius, 1)
assert_array_equal(s4.center, center)
assert_equal(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, None, center)
assert_array_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_array_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_array_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_sort_vertices_of_regions_flattened(self):
expected = sorted([[0, 6, 5, 2, 3], [2, 3, 10, 11, 8, 7], [0, 6, 4, 1], [4, 8,
7, 5, 6], [9, 11, 10], [2, 7, 5], [1, 4, 8, 11, 9], [0, 3, 10, 9,
1]])
expected = list(itertools.chain(*sorted(expected)))
sv = SphericalVoronoi(self.points)
sv.sort_vertices_of_regions()
actual = list(itertools.chain(*sorted(sv.regions)))
assert_array_equal(actual, expected)
def test_num_vertices(self):
# for any n >= 3, a spherical Voronoi diagram has 2n - 4
# vertices; this is a direct consequence of Euler's formula
# as explained by Dinis and Mamede (2010) Proceedings of the
# 2010 International Symposium on Voronoi Diagrams in Science
# and Engineering
sv = SphericalVoronoi(self.points)
expected = self.points.shape[0] * 2 - 4
actual = sv.vertices.shape[0]
assert_equal(actual, expected)
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points,np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
def test_duplicate_point_handling(self):
# an exception should be raised for degenerate generators
# related to Issue# 7046
self.degenerate = np.concatenate((self.points, self.points))
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.degenerate)
def test_incorrect_radius_handling(self):
# an exception should be raised if the radius provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
radius=0.98)
def test_incorrect_center_handling(self):
# an exception should be raised if the center provided
# cannot possibly match the input generators
with assert_raises(ValueError):
sv = spherical_voronoi.SphericalVoronoi(self.points,
center=[0.1,0,0])
| 6,854 | 40.047904 | 86 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/spatial/tests/test_hausdorff.py
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_almost_equal,
assert_array_equal,
assert_equal,
assert_)
from scipy.spatial.distance import directed_hausdorff
from scipy.spatial import distance
from scipy._lib._util import check_random_state
class TestHausdorff(object):
# Test various properties of the directed Hausdorff code.
def setup_method(self):
np.random.seed(1234)
random_angles = np.random.random(100) * np.pi * 2
random_columns = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
random_columns_2 = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
# move one point farther out so we don't have two perfect circles
random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
self.path_1 = random_columns
self.path_2 = random_columns_2
self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1)
self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1)
def test_symmetry(self):
# Ensure that the directed (asymmetric) Hausdorff distance is
# actually asymmetric
forward = directed_hausdorff(self.path_1, self.path_2)[0]
reverse = directed_hausdorff(self.path_2, self.path_1)[0]
assert_(forward != reverse)
def test_brute_force_comparison_forward(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# forward direction.
actual = directed_hausdorff(self.path_1, self.path_2)[0]
# brute force over rows:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_brute_force_comparison_reverse(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# reverse direction.
actual = directed_hausdorff(self.path_2, self.path_1)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_degenerate_case(self):
# The directed Hausdorff distance must be zero if both input
# data arrays match.
actual = directed_hausdorff(self.path_1, self.path_1)[0]
assert_almost_equal(actual, 0.0, decimal=9)
def test_2d_data_forward(self):
# Ensure that 2D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_1[..., :2],
self.path_2[..., :2])[0]
expected = max(np.amin(distance.cdist(self.path_1[..., :2],
self.path_2[..., :2]),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_4d_data_reverse(self):
# Ensure that 4D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_indices(self):
# Ensure that correct point indices are returned -- they should
# correspond to the Hausdorff pair
path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]])
path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]])
actual = directed_hausdorff(path_simple_2, path_simple_1)[1:]
expected = (2, 3)
assert_array_equal(actual, expected)
def test_random_state(self):
# ensure that the global random state is not modified because
# the directed Hausdorff algorithm uses randomization
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
def test_random_state_None_int(self):
# check that seed values of None or int do not alter global
# random state
for seed in [None, 27870671]:
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2, seed)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
| 5,286 | 44.577586 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/decorator.py
|
# ######################### LICENSE ############################ #
# Copyright (c) 2005-2015, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.0.5'
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
getargspec = inspect.getargspec
def get_init(cls):
return cls.__init__.__func__
# getargspec has been deprecated in Python 3.5
ArgSpec = collections.namedtuple(
'ArgSpec', 'args varargs varkw defaults')
def getargspec(f):
"""A replacement for inspect.getargspec"""
spec = getfullargspec(f)
return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = (
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1])
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % (next(self._compile_count),)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorate(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
callerfunc = caller
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_decorate_'] = decorate
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller)
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
contextmanager = decorator(ContextManager)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.__mro__:
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).__mro__[1:]
else:
mro = t.__mro__
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
| 16,046 | 36.669014 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_threadsafety.py
|
from __future__ import division, print_function, absolute_import
import threading
import scipy._lib.decorator
__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
class ReentrancyError(RuntimeError):
pass
class ReentrancyLock(object):
"""
Threading lock that raises an exception for reentrant calls.
Calls from different threads are serialized, and nested calls from the
same thread result to an error.
The object can be used as a context manager, or to decorate functions
via the decorate() method.
"""
def __init__(self, err_msg):
self._rlock = threading.RLock()
self._entered = False
self._err_msg = err_msg
def __enter__(self):
self._rlock.acquire()
if self._entered:
self._rlock.release()
raise ReentrancyError(self._err_msg)
self._entered = True
def __exit__(self, type, value, traceback):
self._entered = False
self._rlock.release()
def decorate(self, func):
def caller(func, *a, **kw):
with self:
return func(*a, **kw)
return scipy._lib.decorator.decorate(func, caller)
def non_reentrant(err_msg=None):
"""
Decorate a function with a threading lock and prevent reentrant calls.
"""
def decorator(func):
msg = err_msg
if msg is None:
msg = "%s is not re-entrant" % func.__name__
lock = ReentrancyLock(msg)
return lock.decorate(func)
return decorator
| 1,530 | 24.098361 | 74 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_testutils.py
|
"""
Generic test utilities.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
__all__ = ['PytestTester', 'check_free_memory']
class FPUModeChangeWarning(RuntimeWarning):
"""Warning about FPU mode change"""
pass
class PytestTester(object):
"""
Pytest test runner entry point.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label="fast", verbose=1, extra_argv=None, doctests=False,
coverage=False, tests=None):
import pytest
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
pytest_args = ['-l']
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose and int(verbose) > 1:
pytest_args += ["-" + "v"*(int(verbose)-1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if tests is None:
tests = [self.module_name]
pytest_args += ['--pyargs'] + list(tests)
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return (code == 0)
def check_free_memory(free_mb):
"""
Check *free_mb* of memory is available, otherwise do pytest.skip
"""
import pytest
try:
mem_free = _parse_size(os.environ['SCIPY_AVAILABLE_MEM'])
msg = '{0} MB memory required, but environment SCIPY_AVAILABLE_MEM={1}'.format(
free_mb, os.environ['SCIPY_AVAILABLE_MEM'])
except KeyError:
mem_free = _get_mem_available()
if mem_free is None:
pytest.skip("Could not determine available memory; set SCIPY_AVAILABLE_MEM "
"variable to free memory in MB to run the test.")
msg = '{0} MB memory required, but {1} MB available'.format(
free_mb, mem_free/1e6)
if mem_free < free_mb * 1e6:
pytest.skip(msg)
def _parse_size(size_str):
suffixes = {'': 1e6,
'b': 1.0,
'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12,
'kb': 1e3, 'Mb': 1e6, 'Gb': 1e9, 'Tb': 1e12,
'kib': 1024.0, 'Mib': 1024.0**2, 'Gib': 1024.0**3, 'Tib': 1024.0**4}
m = re.match(r'^\s*(\d+)\s*({0})\s*$'.format('|'.join(suffixes.keys())),
size_str,
re.I)
if not m or m.group(2) not in suffixes:
raise ValueError("Invalid size string")
return float(m.group(1)) * suffixes[m.group(2)]
def _get_mem_available():
"""
Get information about memory available, not counting swap.
"""
try:
import psutil
return psutil.virtual_memory().available
except (ImportError, AttributeError):
pass
if sys.platform.startswith('linux'):
info = {}
with open('/proc/meminfo', 'r') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = float(p[1]) * 1e3
if 'memavailable' in info:
# Linux >= 3.14
return info['memavailable']
else:
return info['memfree'] + info['cached']
return None
| 3,438 | 25.658915 | 88 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/setup.py
|
from __future__ import division, print_function, absolute_import
import os
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_lib', parent_package, top_path)
config.add_data_files('tests/*.py')
include_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
depends = [os.path.join(include_dir, 'ccallback.h')]
config.add_extension("_ccallback_c",
sources=["_ccallback_c.c"],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_test_ccallback",
sources=["src/_test_ccallback.c"],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_fpumode",
sources=["_fpumode.c"])
def get_messagestream_config(ext, build_dir):
# Generate a header file containing defines
config_cmd = config.get_config_cmd()
defines = []
if config_cmd.check_func('open_memstream', decl=True, call=True):
defines.append(('HAVE_OPEN_MEMSTREAM', '1'))
target = os.path.join(os.path.dirname(__file__), 'src',
'messagestream_config.h')
with open(target, 'w') as f:
for name, value in defines:
f.write('#define {0} {1}\n'.format(name, value))
depends = [os.path.join(include_dir, 'messagestream.h')]
config.add_extension("messagestream",
sources=["messagestream.c"] + [get_messagestream_config],
depends=depends,
include_dirs=[include_dir])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 1,890 | 34.679245 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_util.py
|
from __future__ import division, print_function, absolute_import
import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
import inspect
import numpy as np
def _valarray(shape, value=np.nan, typecode=None):
"""Return an array of all value.
"""
out = np.ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, np.ndarray):
out = np.asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
"""
Mimic `np.select(condlist, choicelist)`.
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
All functions in `choicelist` must accept array arguments in the order
given in `arrays` and must return an array of the same shape as broadcasted
`arrays`.
Examples
--------
>>> x = np.arange(6)
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
array([ 0, 1, 4, 0, 64, 125])
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
array([ 0., 1., 4., 0., 64., 125.])
>>> a = -np.ones_like(x)
>>> _lazyselect([x < 3, x > 3],
... [lambda x, a: x**2, lambda x, a: a * x**3],
... (x, a), default=np.nan)
array([ 0., 1., 4., nan, -64., -125.])
"""
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = _valarray(np.shape(arrays[0]), value=default, typecode=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in Numpy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
class DeprecatedImport(object):
"""
Deprecated import, with redirection + warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for scipy argument validation.
Many scipy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
# Add a replacement for inspect.getargspec() which is deprecated in python 3.5
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846
# Note an inconsistency between inspect.getargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence cook up a common ground replacement: `getargspec_no_self` which
# mimics `inspect.getargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getargspec or bright and shiny .signature.
try:
# is it python 3.3 or higher?
inspect.signature
# Apparently, yes. Wrap inspect.signature
ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec_no_self(func):
"""inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
return ArgSpec(args, varargs, varkw, defaults)
except AttributeError:
# python 2.x
def getargspec_no_self(func):
"""inspect.getargspec replacement for compatibility with python 3.x.
inspect.getargspec is deprecated in python 3. This wraps it, and
*removes* `self` from the argument list of `func`, if present.
This is done for forward compatibility with python 3.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
argspec = inspect.getargspec(func)
if argspec.args[0] == 'self':
argspec.args.pop(0)
return argspec
| 11,545 | 32.958824 | 80 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/six.py
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2012 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
# Replacement for lazy loading stuff in upstream six. See gh-2764
if PY3:
import builtins
import functools
reduce = functools.reduce
zip = builtins.zip
xrange = builtins.range
else:
import __builtin__
import itertools
builtins = __builtin__
reduce = __builtin__.reduce
zip = itertools.izip
xrange = __builtin__.xrange
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| 7,418 | 25.783394 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_numpy_compat.py
|
"""Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
from warnings import WarningMessage
import re
from functools import wraps
import numpy as np
from scipy._lib._version import NumpyVersion
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
if NumpyVersion(np.__version__) >= '1.10.0':
from numpy import broadcast_to
else:
# Definition of `broadcast_to` from numpy 1.10.0.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
return _broadcast_to(array, shape, subok=subok, readonly=True)
if NumpyVersion(np.__version__) >= '1.9.0':
from numpy import unique
else:
# the return_counts keyword was added in 1.9.0
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique value comes up
in `ar`.
.. versionadded:: 1.9.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
Notes
-----
Taken over from numpy 1.12.0-dev (c8408bf9c). Omitted examples,
see numpy documentation for those.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
if NumpyVersion(np.__version__) > '1.12.0.dev':
polyvalfromroots = np.polynomial.polynomial.polyvalfromroots
else:
def polyvalfromroots(x, r, tensor=True):
r"""
Evaluate a polynomial specified by its roots at points x.
This function is copypasted from numpy 1.12.0.dev.
If `r` is of length `N`, this function returns the value
.. math:: p(x) = \prod_{n=1}^{N} (x - r_n)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `r`.
If `r` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`r` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor is ``True`` the shape will be r.shape[1:]
+ x.shape; that is, each polynomial is evaluated at every value of `x`.
If `tensor` is ``False``, the shape will be r.shape[1:]; that is, each
polynomial is evaluated only for the corresponding broadcast value of
`x`. Note that scalars have shape (,).
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with with
themselves and with the elements of `r`.
r : array_like
Array of roots. If `r` is multidimensional the first index is the
root index, while the remaining indices enumerate multiple
polynomials. For instance, in the two dimensional case the roots of
each polynomial may be thought of as stored in the columns of `r`.
tensor : boolean, optional
If True, the shape of the roots array is extended with ones on the
right, one for each dimension of `x`. Scalars have dimension 0 for
this action. The result is that every column of coefficients in `r`
is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `r` for the evaluation. This keyword is useful
when `r` is multidimensional. The default value is True.
Returns
-------
values : ndarray, compatible object
The shape of the returned array is described above.
See Also
--------
polyroots, polyfromroots, polyval
Examples
--------
>>> from numpy.polynomial.polynomial import polyvalfromroots
>>> polyvalfromroots(1, [1,2,3])
0.0
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> polyvalfromroots(a, [-1, 0, 1])
array([[ -0., 0.],
[ 6., 24.]])
>>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
>>> r # each column of r defines one polynomial
array([[-2, -1],
[ 0, 1]])
>>> b = [-2, 1]
>>> polyvalfromroots(b, r, tensor=True)
array([[-0., 3.],
[ 3., 0.]])
>>> polyvalfromroots(b, r, tensor=False)
array([-0., 0.])
"""
r = np.array(r, ndmin=1, copy=0)
if r.dtype.char in '?bBhHiIlLqQpP':
r = r.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray):
if tensor:
r = r.reshape(r.shape + (1,)*x.ndim)
elif x.ndim >= r.ndim:
raise ValueError("x.ndim must be < r.ndim when tensor == "
"False")
return np.prod(x - r, axis=0)
try:
from numpy.testing import suppress_warnings
except ImportError:
class suppress_warnings(object):
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
http://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to "always".
"location" is equivalent to the warnings "default", match by exact
location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
>>> with suppress_warnings() as sup:
... sup.filter(DeprecationWarning, "Some text")
... sup.filter(module=np.ma.core)
... log = sup.record(FutureWarning, "Does this occur?")
... command_giving_warnings()
... # The FutureWarning was given once, the filtered warnings were
... # ignored. All other warnings abide outside settings (may be
... # printed/error)
... assert_(len(log) == 1)
... assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator:
>>> sup = suppress_warnings()
>>> sup.filter(module=np.ma.core) # module must match exact
>>> @sup
>>> def some_function():
... # do something which causes a warning in np.ma.core
... pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
# Suppressions are either instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None, record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module, record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module, record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message, module=module,
record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarning(self, message, category, filename, lineno,
*args, **kwargs):
use_warnmsg = kwargs.pop("use_warnmsg", None)
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, either recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
| 22,468 | 39.484685 | 83 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_version.py
|
"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
from scipy._lib.six import string_types
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy._lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return "NumpyVersion(%s)" % self.vstring
| 4,793 | 29.730769 | 79 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_gcutils.py
|
"""
Module for testing automatic garbage collection of objects
.. autosummary::
:toctree: generated/
set_gc_state - enable or disable garbage collection
gc_state - context manager for given state of garbage collector
assert_deallocated - context manager to check for circular references on object
"""
import weakref
import gc
import sys
from contextlib import contextmanager
__all__ = ['set_gc_state', 'gc_state', 'assert_deallocated']
IS_PYPY = '__pypy__' in sys.modules
class ReferenceError(AssertionError):
pass
def set_gc_state(state):
""" Set status of garbage collector """
if gc.isenabled() == state:
return
if state:
gc.enable()
else:
gc.disable()
@contextmanager
def gc_state(state):
""" Context manager to set state of garbage collector to `state`
Parameters
----------
state : bool
True for gc enabled, False for disabled
Examples
--------
>>> with gc_state(False):
... assert not gc.isenabled()
>>> with gc_state(True):
... assert gc.isenabled()
"""
orig_state = gc.isenabled()
set_gc_state(state)
yield
set_gc_state(orig_state)
@contextmanager
def assert_deallocated(func, *args, **kwargs):
"""Context manager to check that object is deallocated
This is useful for checking that an object can be freed directly by
reference counting, without requiring gc to break reference cycles.
GC is disabled inside the context manager.
This check is not available on PyPy.
Parameters
----------
func : callable
Callable to create object to check
\\*args : sequence
positional arguments to `func` in order to create object to check
\\*\\*kwargs : dict
keyword arguments to `func` in order to create object to check
Examples
--------
>>> class C(object): pass
>>> with assert_deallocated(C) as c:
... # do something
... del c
>>> class C(object):
... def __init__(self):
... self._circular = self # Make circular reference
>>> with assert_deallocated(C) as c: #doctest: +IGNORE_EXCEPTION_DETAIL
... # do something
... del c
Traceback (most recent call last):
...
ReferenceError: Remaining reference(s) to object
"""
if IS_PYPY:
raise RuntimeError("assert_deallocated is unavailable on PyPy")
with gc_state(False):
obj = func(*args, **kwargs)
ref = weakref.ref(obj)
yield obj
del obj
if ref() is not None:
raise ReferenceError("Remaining reference(s) to object")
| 2,645 | 23.962264 | 82 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/__init__.py
|
"""
Module containing private utility functions
===========================================
The ``scipy._lib`` namespace is empty (for now). Tests for all
utilities in submodules of ``_lib`` can be run with::
from scipy import _lib
_lib.test()
"""
from __future__ import division, print_function, absolute_import
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 420 | 23.764706 | 64 |
py
|
cba-pipeline-public
|
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_ccallback.py
|
from . import _ccallback_c
import ctypes
PyCFuncPtr = ctypes.CFUNCTYPE(ctypes.c_void_p).__bases__[0]
ffi = None
class CData(object):
pass
def _import_cffi():
global ffi, CData
if ffi is not None:
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False
class LowLevelCallable(tuple):
"""
Low-level callback function.
Parameters
----------
function : {PyCapsule, ctypes function pointer, cffi function pointer}
Low-level callback function.
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*,
if possible.
Attributes
----------
function
Callback function given
user_data
User data given
signature
Signature of the function.
Methods
-------
from_cython
Class method for constructing callables from Cython C-exported
functions.
Notes
-----
The argument ``function`` can be one of:
- PyCapsule, whose name contains the C function signature
- ctypes function pointer
- cffi function pointer
The signature of the low-level callback must match one of those expected
by the routine it is passed to.
If constructing low-level functions from a PyCapsule, the name of the
capsule must be the corresponding signature, in the format::
return_type (arg1_type, arg2_type, ...)
For example::
"void (double)"
"double (double, int *, void *)"
The context of a PyCapsule passed in as ``function`` is used as ``user_data``,
if an explicit value for `user_data` was not given.
"""
# Make the class immutable
__slots__ = ()
def __new__(cls, function, user_data=None, signature=None):
# We need to hold a reference to the function & user data,
# to prevent them going out of scope
item = cls._parse_callback(function, user_data, signature)
return tuple.__new__(cls, (item, function, user_data))
def __repr__(self):
return "LowLevelCallable({!r}, {!r})".format(self.function, self.user_data)
@property
def function(self):
return tuple.__getitem__(self, 1)
@property
def user_data(self):
return tuple.__getitem__(self, 2)
@property
def signature(self):
return _ccallback_c.get_capsule_signature(tuple.__getitem__(self, 0))
def __getitem__(self, idx):
raise ValueError()
@classmethod
def from_cython(cls, module, name, user_data=None, signature=None):
"""
Create a low-level callback function from an exported Cython function.
Parameters
----------
module : module
Cython module where the exported function resides
name : str
Name of the exported function
user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional
User data to pass on to the callback function.
signature : str, optional
Signature of the function. If omitted, determined from *function*.
"""
try:
function = module.__pyx_capi__[name]
except AttributeError:
raise ValueError("Given module is not a Cython module with __pyx_capi__ attribute")
except KeyError:
raise ValueError("No function {!r} found in __pyx_capi__ of the module".format(name))
return cls(function, user_data, signature)
@classmethod
def _parse_callback(cls, obj, user_data=None, signature=None):
_import_cffi()
if isinstance(obj, LowLevelCallable):
func = tuple.__getitem__(obj, 0)
elif isinstance(obj, PyCFuncPtr):
func, signature = _get_ctypes_func(obj, signature)
elif isinstance(obj, CData):
func, signature = _get_cffi_func(obj, signature)
elif _ccallback_c.check_capsule(obj):
func = obj
else:
raise ValueError("Given input is not a callable or a low-level callable (pycapsule/ctypes/cffi)")
if isinstance(user_data, ctypes.c_void_p):
context = _get_ctypes_data(user_data)
elif isinstance(user_data, CData):
context = _get_cffi_data(user_data)
elif user_data is None:
context = 0
elif _ccallback_c.check_capsule(user_data):
context = user_data
else:
raise ValueError("Given user data is not a valid low-level void* pointer (pycapsule/ctypes/cffi)")
return _ccallback_c.get_raw_capsule(func, signature, context)
#
# ctypes helpers
#
def _get_ctypes_func(func, signature=None):
# Get function pointer
func_ptr = ctypes.cast(func, ctypes.c_void_p).value
# Construct function signature
if signature is None:
signature = _typename_from_ctypes(func.restype) + " ("
for j, arg in enumerate(func.argtypes):
if j == 0:
signature += _typename_from_ctypes(arg)
else:
signature += ", " + _typename_from_ctypes(arg)
signature += ")"
return func_ptr, signature
def _typename_from_ctypes(item):
if item is None:
return "void"
elif item is ctypes.c_void_p:
return "void *"
name = item.__name__
pointer_level = 0
while name.startswith("LP_"):
pointer_level += 1
name = name[3:]
if name.startswith('c_'):
name = name[2:]
if pointer_level > 0:
name += " " + "*"*pointer_level
return name
def _get_ctypes_data(data):
# Get voidp pointer
return ctypes.cast(data, ctypes.c_void_p).value
#
# CFFI helpers
#
def _get_cffi_func(func, signature=None):
# Get function pointer
func_ptr = ffi.cast('uintptr_t', func)
# Get signature
if signature is None:
signature = ffi.getctype(ffi.typeof(func)).replace('(*)', ' ')
return func_ptr, signature
def _get_cffi_data(data):
# Get pointer
return ffi.cast('uintptr_t', data)
| 6,197 | 26.184211 | 110 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.