Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/_tmpdirs.py
''' Contexts for *with* statement providing temporary directories ''' from __future__ import division, print_function, absolute_import import os from contextlib import contextmanager from shutil import rmtree from tempfile import mkdtemp @contextmanager def tempdir(): """Create and return a temporary directory. This has the same behavior as mkdtemp but can be used as a context manager. Upon exiting the context, the directory and everything contained in it are removed. Examples -------- >>> import os >>> with tempdir() as tmpdir: ... fname = os.path.join(tmpdir, 'example_file.txt') ... with open(fname, 'wt') as fobj: ... _ = fobj.write('a string\\n') >>> os.path.exists(tmpdir) False """ d = mkdtemp() yield d rmtree(d) @contextmanager def in_tempdir(): ''' Create, return, and change directory to a temporary directory Examples -------- >>> import os >>> my_cwd = os.getcwd() >>> with in_tempdir() as tmpdir: ... _ = open('test.txt', 'wt').write('some text') ... assert os.path.isfile('test.txt') ... assert os.path.isfile(os.path.join(tmpdir, 'test.txt')) >>> os.path.exists(tmpdir) False >>> os.getcwd() == my_cwd True ''' pwd = os.getcwd() d = mkdtemp() os.chdir(d) yield d os.chdir(pwd) rmtree(d) @contextmanager def in_dir(dir=None): """ Change directory to given directory for duration of ``with`` block Useful when you want to use `in_tempdir` for the final test, but you are still debugging. For example, you may want to do this in the end: >>> with in_tempdir() as tmpdir: ... # do something complicated which might break ... pass But indeed the complicated thing does break, and meanwhile the ``in_tempdir`` context manager wiped out the directory with the temporary files that you wanted for debugging. So, while debugging, you replace with something like: >>> with in_dir() as tmpdir: # Use working directory by default ... # do something complicated which might break ... pass You can then look at the temporary file outputs to debug what is happening, fix, and finally replace ``in_dir`` with ``in_tempdir`` again. """ cwd = os.getcwd() if dir is None: yield cwd return os.chdir(dir) yield dir os.chdir(cwd)
2,439
26.727273
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test_tmpdirs.py
""" Test tmpdirs module """ from __future__ import division, print_function, absolute_import from os import getcwd from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir from numpy.testing import assert_, assert_equal MY_PATH = abspath(__file__) MY_DIR = dirname(MY_PATH) def test_tempdir(): with tempdir() as tmpdir: fname = pjoin(tmpdir, 'example_file.txt') with open(fname, 'wt') as fobj: fobj.write('a string\\n') assert_(not exists(tmpdir)) def test_in_tempdir(): my_cwd = getcwd() with in_tempdir() as tmpdir: with open('test.txt', 'wt') as f: f.write('some text') assert_(isfile('test.txt')) assert_(isfile(pjoin(tmpdir, 'test.txt'))) assert_(not exists(tmpdir)) assert_equal(getcwd(), my_cwd) def test_given_directory(): # Test InGivenDirectory cwd = getcwd() with in_dir() as tmpdir: assert_equal(tmpdir, abspath(cwd)) assert_equal(tmpdir, abspath(getcwd())) with in_dir(MY_DIR) as tmpdir: assert_equal(tmpdir, MY_DIR) assert_equal(realpath(MY_DIR), realpath(abspath(getcwd()))) # We were deleting the given directory! Check not so now. assert_(isfile(MY_PATH))
1,310
27.5
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test__testutils.py
from __future__ import division, print_function, absolute_import import sys from scipy._lib._testutils import _parse_size, _get_mem_available import pytest def test__parse_size(): expected = { '12': 12e6, '12 b': 12, '12k': 12e3, ' 12 M ': 12e6, ' 12 G ': 12e9, ' 12Tb ': 12e12, '12 Mib ': 12 * 1024.0**2, '12Tib': 12 * 1024.0**4, } for inp, outp in sorted(expected.items()): if outp is None: with pytest.raises(ValueError): _parse_size(inp) else: assert _parse_size(inp) == outp def test__mem_available(): # May return None on non-Linux platforms available = _get_mem_available() if sys.platform.startswith('linux'): assert available >= 0 else: assert available is None or available >= 0
866
23.771429
65
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test_warnings.py
""" Tests which scan for certain occurrences in the code, they may not find all of these occurrences but should catch almost all. This file was adapted from numpy. """ from __future__ import division, absolute_import, print_function import sys import scipy import pytest if sys.version_info >= (3, 4): from pathlib import Path import ast import tokenize class ParseCall(ast.NodeVisitor): def __init__(self): self.ls = [] def visit_Attribute(self, node): ast.NodeVisitor.generic_visit(self, node) self.ls.append(node.attr) def visit_Name(self, node): self.ls.append(node.id) class FindFuncs(ast.NodeVisitor): def __init__(self, filename): super().__init__() self.__filename = filename self.bad_filters = [] self.bad_stacklevels = [] def visit_Call(self, node): p = ParseCall() p.visit(node.func) ast.NodeVisitor.generic_visit(self, node) if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': if node.args[0].s == "ignore": self.bad_filters.append( "{}:{}".format(self.__filename, node.lineno)) if p.ls[-1] == 'warn' and ( len(p.ls) == 1 or p.ls[-2] == 'warnings'): if self.__filename == "_lib/tests/test_warnings.py": # This file return # See if stacklevel exists: if len(node.args) == 3: return args = {kw.arg for kw in node.keywords} if "stacklevel" not in args: self.bad_stacklevels.append( "{}:{}".format(self.__filename, node.lineno)) @pytest.fixture(scope="session") def warning_calls(): # combined "ignore" and stacklevel error base = Path(scipy.__file__).parent bad_filters = [] bad_stacklevels = [] for path in base.rglob("*.py"): # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: tree = ast.parse(file.read(), filename=str(path)) finder = FindFuncs(path.relative_to(base)) finder.visit(tree) bad_filters.extend(finder.bad_filters) bad_stacklevels.extend(finder.bad_stacklevels) return bad_filters, bad_stacklevels @pytest.mark.slow @pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4") def test_warning_calls_filters(warning_calls): bad_filters, bad_stacklevels = warning_calls # There is still one missing occurrence in optimize.py, # this is one that should be fixed and this removed then. bad_filters = [item for item in bad_filters if 'optimize.py' not in item] if bad_filters: raise AssertionError( "warning ignore filter should not be used, instead, use\n" "scipy._lib._numpy_compat.suppress_warnings (in tests only);\n" "found in:\n {}".format( "\n ".join(bad_filters))) @pytest.mark.slow @pytest.mark.skipif(sys.version_info < (3, 4), reason="needs Python >= 3.4") @pytest.mark.xfail(reason="stacklevels currently missing") def test_warning_calls_stacklevels(warning_calls): bad_filters, bad_stacklevels = warning_calls msg = "" if bad_filters: msg += ("warning ignore filter should not be used, instead, use\n" "scipy._lib._numpy_compat.suppress_warnings (in tests only);\n" "found in:\n {}".format("\n ".join(bad_filters))) msg += "\n\n" if bad_stacklevels: msg += "warnings should have an appropriate stacklevel:\n {}".format( "\n ".join(bad_stacklevels)) if msg: raise AssertionError(msg)
3,975
31.064516
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test__version.py
from __future__ import division, absolute_import, print_function from numpy.testing import assert_ from pytest import raises as assert_raises from scipy._lib._version import NumpyVersion def test_main_versions(): assert_(NumpyVersion('1.8.0') == '1.8.0') for ver in ['1.9.0', '2.0.0', '1.8.1']: assert_(NumpyVersion('1.8.0') < ver) for ver in ['1.7.0', '1.7.1', '0.9.9']: assert_(NumpyVersion('1.8.0') > ver) def test_version_1_point_10(): # regression test for gh-2998. assert_(NumpyVersion('1.9.0') < '1.10.0') assert_(NumpyVersion('1.11.0') < '1.11.1') assert_(NumpyVersion('1.11.0') == '1.11.0') assert_(NumpyVersion('1.99.11') < '1.99.12') def test_alpha_beta_rc(): assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') for ver in ['1.8.0', '1.8.0rc2']: assert_(NumpyVersion('1.8.0rc1') < ver) for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: assert_(NumpyVersion('1.8.0rc1') > ver) assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') def test_dev_version(): assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') def test_dev_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') def test_dev0_version(): assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') def test_dev0_a_b_rc_mixed(): assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') def test_raises(): for ver in ['1.9', '1,9.0', '1.7.x']: assert_raises(ValueError, NumpyVersion, ver)
2,052
30.106061
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test__util.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal, assert_ from pytest import raises as assert_raises from scipy._lib._util import _aligned_zeros, check_random_state def test__aligned_zeros(): niter = 10 def check(shape, dtype, order, align): err_msg = repr((shape, dtype, order, align)) x = _aligned_zeros(shape, dtype, order, align=align) if align is None: align = np.dtype(dtype).alignment assert_equal(x.__array_interface__['data'][0] % align, 0) if hasattr(shape, '__len__'): assert_equal(x.shape, shape, err_msg) else: assert_equal(x.shape, (shape,), err_msg) assert_equal(x.dtype, dtype) if order == "C": assert_(x.flags.c_contiguous, err_msg) elif order == "F": if x.size > 0: # Size-0 arrays get invalid flags on Numpy 1.5 assert_(x.flags.f_contiguous, err_msg) elif order is None: assert_(x.flags.c_contiguous, err_msg) else: raise ValueError() # try various alignments for align in [1, 2, 3, 4, 8, 16, 32, 64, None]: for n in [0, 1, 3, 11]: for order in ["C", "F", None]: for dtype in [np.uint8, np.float64]: for shape in [n, (1, 2, 3, n)]: for j in range(niter): check(shape, dtype, order, align) def test_check_random_state(): # If seed is None, return the RandomState singleton used by np.random. # If seed is an int, return a new RandomState instance seeded with seed. # If seed is already a RandomState instance, return it. # Otherwise raise ValueError. rsi = check_random_state(1) assert_equal(type(rsi), np.random.RandomState) rsi = check_random_state(rsi) assert_equal(type(rsi), np.random.RandomState) rsi = check_random_state(None) assert_equal(type(rsi), np.random.RandomState) assert_raises(ValueError, check_random_state, 'a')
2,104
35.929825
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test_import_cycles.py
from __future__ import division, print_function, absolute_import import sys import subprocess MODULES = [ "scipy.cluster", "scipy.cluster.vq", "scipy.cluster.hierarchy", "scipy.constants", "scipy.fftpack", "scipy.integrate", "scipy.interpolate", "scipy.io", "scipy.io.arff", "scipy.io.harwell_boeing", "scipy.io.idl", "scipy.io.matlab", "scipy.io.netcdf", "scipy.io.wavfile", "scipy.linalg", "scipy.linalg.blas", "scipy.linalg.cython_blas", "scipy.linalg.lapack", "scipy.linalg.cython_lapack", "scipy.linalg.interpolative", "scipy.misc", "scipy.ndimage", "scipy.odr", "scipy.optimize", "scipy.signal", "scipy.signal.windows", "scipy.sparse", "scipy.sparse.linalg", "scipy.sparse.csgraph", "scipy.spatial", "scipy.spatial.distance", "scipy.special", "scipy.stats", "scipy.stats.distributions", "scipy.stats.mstats", ] def test_modules_importable(): # Check that all modules are importable in a new Python # process. This is not necessarily true (esp on Python 2) if there # are import cycles present. for module in MODULES: cmd = 'import {}'.format(module) subprocess.check_call([sys.executable, '-c', cmd])
1,284
23.245283
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test_ccallback.py
from __future__ import division, print_function, absolute_import from numpy.testing import assert_equal, assert_ from pytest import raises as assert_raises import time import pytest import ctypes import threading from scipy._lib import _ccallback_c as _test_ccallback_cython from scipy._lib import _test_ccallback from scipy._lib._ccallback import LowLevelCallable try: import cffi HAVE_CFFI = True except ImportError: HAVE_CFFI = False ERROR_VALUE = 2.0 def callback_python(a, user_data=None): if a == ERROR_VALUE: raise ValueError("bad value") if user_data is None: return a + 1 else: return a + user_data def _get_cffi_func(base, signature): if not HAVE_CFFI: pytest.skip("cffi not installed") # Get function address voidp = ctypes.cast(base, ctypes.c_void_p) address = voidp.value # Create corresponding cffi handle ffi = cffi.FFI() func = ffi.cast(signature, address) return func def _get_ctypes_data(): value = ctypes.c_double(2.0) return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp) def _get_cffi_data(): if not HAVE_CFFI: pytest.skip("cffi not installed") ffi = cffi.FFI() return ffi.new('double *', 2.0) CALLERS = { 'simple': _test_ccallback.test_call_simple, 'nodata': _test_ccallback.test_call_nodata, 'nonlocal': _test_ccallback.test_call_nonlocal, 'cython': _test_ccallback_cython.test_call_cython, } # These functions have signatures known to the callers FUNCS = { 'python': lambda: callback_python, 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(), 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"), 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes, 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes, 'double (*)(double, int *, void *)'), 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(), 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"), 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes, 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes, 'double (*)(double, double, int *, void *)'), } # These functions have signatures the callers don't know BAD_FUNCS = { 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(), 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"), 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes, 'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes, 'double (*)(double, double, double, int *, void *)'), } USER_DATAS = { 'ctypes': _get_ctypes_data, 'cffi': _get_cffi_data, 'capsule': _test_ccallback.test_get_data_capsule, } def test_callbacks(): def check(caller, func, user_data): caller = CALLERS[caller] func = FUNCS[func]() user_data = USER_DATAS[user_data]() if func is callback_python: func2 = lambda x: func(x, 2.0) else: func2 = LowLevelCallable(func, user_data) func = LowLevelCallable(func) # Test basic call assert_equal(caller(func, 1.0), 2.0) # Test 'bad' value resulting to an error assert_raises(ValueError, caller, func, ERROR_VALUE) # Test passing in user_data assert_equal(caller(func2, 1.0), 3.0) for caller in sorted(CALLERS.keys()): for func in sorted(FUNCS.keys()): for user_data in sorted(USER_DATAS.keys()): check(caller, func, user_data) def test_bad_callbacks(): def check(caller, func, user_data): caller = CALLERS[caller] user_data = USER_DATAS[user_data]() func = BAD_FUNCS[func]() if func is callback_python: func2 = lambda x: func(x, 2.0) else: func2 = LowLevelCallable(func, user_data) func = LowLevelCallable(func) # Test that basic call fails assert_raises(ValueError, caller, LowLevelCallable(func), 1.0) # Test that passing in user_data also fails assert_raises(ValueError, caller, func2, 1.0) # Test error message llfunc = LowLevelCallable(func) try: caller(llfunc, 1.0) except ValueError as err: msg = str(err) assert_(llfunc.signature in msg, msg) assert_('double (double, double, int *, void *)' in msg, msg) for caller in sorted(CALLERS.keys()): for func in sorted(BAD_FUNCS.keys()): for user_data in sorted(USER_DATAS.keys()): check(caller, func, user_data) def test_signature_override(): caller = _test_ccallback.test_call_simple func = _test_ccallback.test_get_plus1_capsule() llcallable = LowLevelCallable(func, signature="bad signature") assert_equal(llcallable.signature, "bad signature") assert_raises(ValueError, caller, llcallable, 3) llcallable = LowLevelCallable(func, signature="double (double, int *, void *)") assert_equal(llcallable.signature, "double (double, int *, void *)") assert_equal(caller(llcallable, 3), 4) def test_threadsafety(): def callback(a, caller): if a <= 0: return 1 else: res = caller(lambda x: callback(x, caller), a - 1) return 2*res def check(caller): caller = CALLERS[caller] results = [] count = 10 def run(): time.sleep(0.01) r = caller(lambda x: callback(x, caller), count) results.append(r) threads = [threading.Thread(target=run) for j in range(20)] for thread in threads: thread.start() for thread in threads: thread.join() assert_equal(results, [2.0**count]*len(threads)) for caller in CALLERS.keys(): check(caller)
6,061
29.31
96
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test__threadsafety.py
from __future__ import division, print_function, absolute_import import threading import time import traceback from numpy.testing import assert_ from pytest import raises as assert_raises from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError def test_parallel_threads(): # Check that ReentrancyLock serializes work in parallel threads. # # The test is not fully deterministic, and may succeed falsely if # the timings go wrong. lock = ReentrancyLock("failure") failflag = [False] exceptions_raised = [] def worker(k): try: with lock: assert_(not failflag[0]) failflag[0] = True time.sleep(0.1 * k) assert_(failflag[0]) failflag[0] = False except: exceptions_raised.append(traceback.format_exc(2)) threads = [threading.Thread(target=lambda k=k: worker(k)) for k in range(3)] for t in threads: t.start() for t in threads: t.join() exceptions_raised = "\n".join(exceptions_raised) assert_(not exceptions_raised, exceptions_raised) def test_reentering(): # Check that ReentrancyLock prevents re-entering from the same thread. @non_reentrant() def func(x): return func(x) assert_raises(ReentrancyError, func, 0)
1,378
24.537037
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/test__gcutils.py
""" Test for assert_deallocated context manager and gc utilities """ from __future__ import division, print_function, absolute_import import gc from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated, ReferenceError, IS_PYPY) from numpy.testing import assert_equal import pytest def test_set_gc_state(): gc_status = gc.isenabled() try: for state in (True, False): gc.enable() set_gc_state(state) assert_equal(gc.isenabled(), state) gc.disable() set_gc_state(state) assert_equal(gc.isenabled(), state) finally: if gc_status: gc.enable() def test_gc_state(): # Test gc_state context manager gc_status = gc.isenabled() try: for pre_state in (True, False): set_gc_state(pre_state) for with_state in (True, False): # Check the gc state is with_state in with block with gc_state(with_state): assert_equal(gc.isenabled(), with_state) # And returns to previous state outside block assert_equal(gc.isenabled(), pre_state) # Even if the gc state is set explicitly within the block with gc_state(with_state): assert_equal(gc.isenabled(), with_state) set_gc_state(not with_state) assert_equal(gc.isenabled(), pre_state) finally: if gc_status: gc.enable() @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") def test_assert_deallocated(): # Ordinary use class C(object): def __init__(self, arg0, arg1, name='myname'): self.name = name for gc_current in (True, False): with gc_state(gc_current): # We are deleting from with-block context, so that's OK with assert_deallocated(C, 0, 2, 'another name') as c: assert_equal(c.name, 'another name') del c # Or not using the thing in with-block context, also OK with assert_deallocated(C, 0, 2, name='third name'): pass assert_equal(gc.isenabled(), gc_current) @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") def test_assert_deallocated_nodel(): class C(object): pass with pytest.raises(ReferenceError): # Need to delete after using if in with-block context with assert_deallocated(C) as c: pass @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") def test_assert_deallocated_circular(): class C(object): def __init__(self): self._circular = self with pytest.raises(ReferenceError): # Circular reference, no automatic garbage collection with assert_deallocated(C) as c: del c @pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") def test_assert_deallocated_circular2(): class C(object): def __init__(self): self._circular = self with pytest.raises(ReferenceError): # Still circular reference, no automatic garbage collection with assert_deallocated(C): pass
3,276
31.77
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/_lib/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/hierarchy.py
""" ======================================================== Hierarchical clustering (:mod:`scipy.cluster.hierarchy`) ======================================================== .. currentmodule:: scipy.cluster.hierarchy These functions cut hierarchical clusterings into flat clusterings or find the roots of the forest formed by a cut by providing the flat cluster ids of each observation. .. autosummary:: :toctree: generated/ fcluster fclusterdata leaders These are routines for agglomerative clustering. .. autosummary:: :toctree: generated/ linkage single complete average weighted centroid median ward These routines compute statistics on hierarchies. .. autosummary:: :toctree: generated/ cophenet from_mlab_linkage inconsistent maxinconsts maxdists maxRstat to_mlab_linkage Routines for visualizing flat clusters. .. autosummary:: :toctree: generated/ dendrogram These are data structures and routines for representing hierarchies as tree objects. .. autosummary:: :toctree: generated/ ClusterNode leaves_list to_tree cut_tree optimal_leaf_ordering These are predicates for checking the validity of linkage and inconsistency matrices as well as for checking isomorphism of two flat cluster assignments. .. autosummary:: :toctree: generated/ is_valid_im is_valid_linkage is_isomorphic is_monotonic correspond num_obs_linkage Utility routines for plotting: .. autosummary:: :toctree: generated/ set_link_color_palette References ---------- .. [1] "Statistics toolbox." API Reference Documentation. The MathWorks. http://www.mathworks.com/access/helpdesk/help/toolbox/stats/. Accessed October 1, 2007. .. [2] "Hierarchical clustering." API Reference Documentation. The Wolfram Research, Inc. https://reference.wolfram.com/language/HierarchicalClustering/tutorial/HierarchicalClustering.html. Accessed October 1, 2007. .. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969. .. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective function." Journal of the American Statistical Association. 58(301): pp. 236--44. 1963. .. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika. 32(2): pp. 241--54. 1966. .. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp. 855--60. 1962. .. [7] Batagelj, V. "Comparing resemblance measures." Journal of Classification. 12: pp. 73--90. 1995. .. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating systematic relationships." Scientific Bulletins. 38(22): pp. 1409--38. 1958. .. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering algorithms: the problem of classifying everybody." Multivariate Behavioral Research. 14: pp. 367--84. 1979. .. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data." Prentice-Hall. Englewood Cliffs, NJ. 1988. .. [11] Fisher, RA "The use of multiple measurements in taxonomic problems." Annals of Eugenics, 7(2): 179-188. 1936 * MATLAB and MathWorks are registered trademarks of The MathWorks, Inc. * Mathematica is a registered trademark of The Wolfram Research, Inc. """ from __future__ import division, print_function, absolute_import # Copyright (C) Damian Eads, 2007-2008. New BSD License. # hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com) # # Author: Damian Eads # Date: September 22, 2007 # # Copyright (c) 2007, 2008, Damian Eads # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # - Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # - Neither the name of the author nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import warnings import bisect from collections import deque import numpy as np from . import _hierarchy, _optimal_leaf_ordering import scipy.spatial.distance as distance from scipy._lib.six import string_types from scipy._lib.six import xrange _LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3, 'median': 4, 'ward': 5, 'weighted': 6} _EUCLIDEAN_METHODS = ('centroid', 'median', 'ward') __all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata', 'from_mlab_linkage', 'inconsistent', 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering', 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance'] class ClusterWarning(UserWarning): pass def _warning(s): warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3) def _copy_array_if_base_present(a): """ Copy the array if its base points to a parent array. """ if a.base is not None: return a.copy() elif np.issubsctype(a, np.float32): return np.array(a, dtype=np.double) else: return a def _copy_arrays_if_base_present(T): """ Accept a tuple of arrays T. Copies the array T[i] if its base array points to an actual array. Otherwise, the reference is just copied. This is useful if the arrays are being passed to a C function that does not do proper striding. """ l = [_copy_array_if_base_present(a) for a in T] return l def _randdm(pnts): """ Generate a random distance matrix stored in condensed form. Parameters ---------- pnts : int The number of points in the distance matrix. Has to be at least 2. Returns ------- D : ndarray A ``pnts * (pnts - 1) / 2`` sized vector is returned. """ if pnts >= 2: D = np.random.rand(pnts * (pnts - 1) / 2) else: raise ValueError("The number of points in the distance matrix " "must be at least 2.") return D def single(y): """ Perform single/min/nearest linkage on the condensed distance matrix ``y``. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray The linkage matrix. See Also -------- linkage: for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='single', metric='euclidean') def complete(y): """ Perform complete/max/farthest point linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See the `linkage` function documentation for more information on its structure. See Also -------- linkage: for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='complete', metric='euclidean') def average(y): """ Perform average/UPGMA linkage on a condensed distance matrix. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage: for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='average', metric='euclidean') def weighted(y): """ Perform weighted/WPGMA linkage on the condensed distance matrix. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='weighted', metric='euclidean') def centroid(y): """ Perform centroid/UPGMC linkage. See `linkage` for more information on the input matrix, return structure, and algorithm. The following are common calling conventions: 1. ``Z = centroid(y)`` Performs centroid/UPGMC linkage on the condensed distance matrix ``y``. 2. ``Z = centroid(X)`` Performs centroid/UPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See the `linkage` function documentation for more information on its structure. See Also -------- linkage: for advanced creation of hierarchical clusterings. """ return linkage(y, method='centroid', metric='euclidean') def median(y): """ Perform median/WPGMC linkage. See `linkage` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = median(y)`` Performs median/WPGMC linkage on the condensed distance matrix ``y``. See ``linkage`` for more information on the return structure and algorithm. 2. ``Z = median(X)`` Performs median/WPGMC linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. See Also -------- linkage: for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='median', metric='euclidean') def ward(y): """ Perform Ward's linkage on a condensed distance matrix. See `linkage` for more information on the return structure and algorithm. The following are common calling conventions: 1. ``Z = ward(y)`` Performs Ward's linkage on the condensed distance matrix ``y``. 2. ``Z = ward(X)`` Performs Ward's linkage on the observation matrix ``X`` using Euclidean distance as the distance metric. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. See `linkage` for more information on the return structure and algorithm. See Also -------- linkage: for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics """ return linkage(y, method='ward', metric='euclidean') def linkage(y, method='single', metric='euclidean', optimal_ordering=False): """ Perform hierarchical/agglomerative clustering. The input y may be either a 1d condensed distance matrix or a 2d array of observation vectors. If y is a 1d condensed distance matrix, then y must be a :math:`\\binom{n}{2}` sized vector where n is the number of original observations paired in the distance matrix. The behavior of this function is very similar to the MATLAB linkage function. A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A cluster with an index less than :math:`n` corresponds to one of the :math:`n` original observations. The distance between clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The fourth value ``Z[i, 3]`` represents the number of original observations in the newly formed cluster. The following linkage methods are used to compute the distance :math:`d(s, t)` between two clusters :math:`s` and :math:`t`. The algorithm begins with a forest of clusters that have yet to be used in the hierarchy being formed. When two clusters :math:`s` and :math:`t` from this forest are combined into a single cluster :math:`u`, :math:`s` and :math:`t` are removed from the forest, and :math:`u` is added to the forest. When only one cluster remains in the forest, the algorithm stops, and this cluster becomes the root. A distance matrix is maintained at each iteration. The ``d[i,j]`` entry corresponds to the distance between cluster :math:`i` and :math:`j` in the original forest. At each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster u with the remaining clusters in the forest. Suppose there are :math:`|u|` original observations :math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and :math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in cluster :math:`v`. Recall :math:`s` and :math:`t` are combined to form cluster :math:`u`. Let :math:`v` be any remaining cluster in the forest that is not :math:`u`. The following are methods for calculating the distance between the newly formed cluster :math:`u` and each :math:`v`. * method='single' assigns .. math:: d(u,v) = \\min(dist(u[i],v[j])) for all points :math:`i` in cluster :math:`u` and :math:`j` in cluster :math:`v`. This is also known as the Nearest Point Algorithm. * method='complete' assigns .. math:: d(u, v) = \\max(dist(u[i],v[j])) for all points :math:`i` in cluster u and :math:`j` in cluster :math:`v`. This is also known by the Farthest Point Algorithm or Voor Hees Algorithm. * method='average' assigns .. math:: d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])} {(|u|*|v|)} for all points :math:`i` and :math:`j` where :math:`|u|` and :math:`|v|` are the cardinalities of clusters :math:`u` and :math:`v`, respectively. This is also called the UPGMA algorithm. * method='weighted' assigns .. math:: d(u,v) = (dist(s,v) + dist(t,v))/2 where cluster u was formed with cluster s and t and v is a remaining cluster in the forest. (also called WPGMA) * method='centroid' assigns .. math:: dist(s,t) = ||c_s-c_t||_2 where :math:`c_s` and :math:`c_t` are the centroids of clusters :math:`s` and :math:`t`, respectively. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the new centroid is computed over all the original objects in clusters :math:`s` and :math:`t`. The distance then becomes the Euclidean distance between the centroid of :math:`u` and the centroid of a remaining cluster :math:`v` in the forest. This is also known as the UPGMC algorithm. * method='median' assigns :math:`d(s,t)` like the ``centroid`` method. When two clusters :math:`s` and :math:`t` are combined into a new cluster :math:`u`, the average of centroids s and t give the new centroid :math:`u`. This is also known as the WPGMC algorithm. * method='ward' uses the Ward variance minimization algorithm. The new entry :math:`d(u,v)` is computed as follows, .. math:: d(u,v) = \\sqrt{\\frac{|v|+|s|} {T}d(v,s)^2 + \\frac{|v|+|t|} {T}d(v,t)^2 - \\frac{|v|} {T}d(s,t)^2} where :math:`u` is the newly joined cluster consisting of clusters :math:`s` and :math:`t`, :math:`v` is an unused cluster in the forest, :math:`T=|v|+|s|+|t|`, and :math:`|*|` is the cardinality of its argument. This is also known as the incremental algorithm. Warning: When the minimum distance pair in the forest is chosen, there may be two or more pairs with the same minimum distance. This implementation may choose a different minimum than the MATLAB version. Parameters ---------- y : ndarray A condensed distance matrix. A condensed distance matrix is a flat array containing the upper triangular of the distance matrix. This is the form that ``pdist`` returns. Alternatively, a collection of :math:`m` observation vectors in :math:`n` dimensions may be passed as an :math:`m` by :math:`n` array. All elements of the condensed distance matrix must be finite, i.e. no NaNs or infs. method : str, optional The linkage algorithm to use. See the ``Linkage Methods`` section below for full descriptions. metric : str or function, optional The distance metric to use in the case that y is a collection of observation vectors; ignored otherwise. See the ``pdist`` function for a list of valid distance metrics. A custom distance function can also be used. optimal_ordering : bool, optional If True, the linkage matrix will be reordered so that the distance between successive leaves is minimal. This results in a more intuitive tree structure when the data are visualized. defaults to False, because this algorithm can be slow, particularly on large datasets [2]_. See also the `optimal_leaf_ordering` function. .. versionadded:: 1.0.0 Returns ------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. Notes ----- 1. For method 'single' an optimized algorithm based on minimum spanning tree is implemented. It has time complexity :math:`O(n^2)`. For methods 'complete', 'average', 'weighted' and 'ward' an algorithm called nearest-neighbors chain is implemented. It also has time complexity :math:`O(n^2)`. For other methods a naive algorithm is implemented with :math:`O(n^3)` time complexity. All algorithms use :math:`O(n^2)` memory. Refer to [1]_ for details about the algorithms. 2. Methods 'centroid', 'median' and 'ward' are correctly defined only if Euclidean pairwise metric is used. If `y` is passed as precomputed pairwise distances, then it is a user responsibility to assure that these distances are in fact Euclidean, otherwise the produced result will be incorrect. See Also -------- scipy.spatial.distance.pdist : pairwise distance metrics References ---------- .. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering algorithms", :arXiv:`1109.2378v1`. .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, "Fast optimal leaf ordering for hierarchical clustering", 2001. Bioinformatics https://doi.org/10.1093/bioinformatics/17.suppl_1.S22 Examples -------- >>> from scipy.cluster.hierarchy import dendrogram, linkage >>> from matplotlib import pyplot as plt >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] >>> Z = linkage(X, 'ward') >>> fig = plt.figure(figsize=(25, 10)) >>> dn = dendrogram(Z) >>> Z = linkage(X, 'single') >>> fig = plt.figure(figsize=(25, 10)) >>> dn = dendrogram(Z) >>> plt.show() """ if method not in _LINKAGE_METHODS: raise ValueError("Invalid method: {0}".format(method)) y = _convert_to_double(np.asarray(y, order='c')) if y.ndim == 1: distance.is_valid_y(y, throw=True, name='y') [y] = _copy_arrays_if_base_present([y]) elif y.ndim == 2: if method in _EUCLIDEAN_METHODS and metric != 'euclidean': raise ValueError("Method '{0}' requires the distance metric " "to be Euclidean".format(method)) if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0): if np.all(y >= 0) and np.allclose(y, y.T): _warning('The symmetric non-negative hollow observation ' 'matrix looks suspiciously like an uncondensed ' 'distance matrix') y = distance.pdist(y, metric) else: raise ValueError("`y` must be 1 or 2 dimensional.") if not np.all(np.isfinite(y)): raise ValueError("The condensed distance matrix must contain only " "finite values.") n = int(distance.num_obs_y(y)) method_code = _LINKAGE_METHODS[method] if method == 'single': result = _hierarchy.mst_single_linkage(y, n) elif method in ['complete', 'average', 'weighted', 'ward']: result = _hierarchy.nn_chain(y, n, method_code) else: result = _hierarchy.fast_linkage(y, n, method_code) if optimal_ordering: return optimal_leaf_ordering(result, y) else: return result class ClusterNode: """ A tree node class for representing a cluster. Leaf nodes correspond to original observations, while non-leaf nodes correspond to non-singleton clusters. The `to_tree` function converts a matrix returned by the linkage function into an easy-to-use tree representation. All parameter names are also attributes. Parameters ---------- id : int The node id. left : ClusterNode instance, optional The left child tree node. right : ClusterNode instance, optional The right child tree node. dist : float, optional Distance for this cluster in the linkage matrix. count : int, optional The number of samples in this cluster. See Also -------- to_tree : for converting a linkage matrix ``Z`` into a tree object. """ def __init__(self, id, left=None, right=None, dist=0, count=1): if id < 0: raise ValueError('The id must be non-negative.') if dist < 0: raise ValueError('The distance must be non-negative.') if (left is None and right is not None) or \ (left is not None and right is None): raise ValueError('Only full or proper binary trees are permitted.' ' This node has one child.') if count < 1: raise ValueError('A cluster must contain at least one original ' 'observation.') self.id = id self.left = left self.right = right self.dist = dist if self.left is None: self.count = count else: self.count = left.count + right.count def __lt__(self, node): if not isinstance(node, ClusterNode): raise ValueError("Can't compare ClusterNode " "to type {}".format(type(node))) return self.dist < node.dist def __gt__(self, node): if not isinstance(node, ClusterNode): raise ValueError("Can't compare ClusterNode " "to type {}".format(type(node))) return self.dist > node.dist def __eq__(self, node): if not isinstance(node, ClusterNode): raise ValueError("Can't compare ClusterNode " "to type {}".format(type(node))) return self.dist == node.dist def get_id(self): """ The identifier of the target node. For ``0 <= i < n``, `i` corresponds to original observation i. For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed at iteration ``i-n``. Returns ------- id : int The identifier of the target node. """ return self.id def get_count(self): """ The number of leaf nodes (original observations) belonging to the cluster node nd. If the target node is a leaf, 1 is returned. Returns ------- get_count : int The number of leaf nodes below the target node. """ return self.count def get_left(self): """ Return a reference to the left child tree object. Returns ------- left : ClusterNode The left child of the target node. If the node is a leaf, None is returned. """ return self.left def get_right(self): """ Return a reference to the right child tree object. Returns ------- right : ClusterNode The left child of the target node. If the node is a leaf, None is returned. """ return self.right def is_leaf(self): """ Return True if the target node is a leaf. Returns ------- leafness : bool True if the target node is a leaf node. """ return self.left is None def pre_order(self, func=(lambda x: x.id)): """ Perform pre-order traversal without recursive function calls. When a leaf node is first encountered, ``func`` is called with the leaf node as its argument, and its result is appended to the list. For example, the statement:: ids = root.pre_order(lambda x: x.id) returns a list of the node ids corresponding to the leaf nodes of the tree as they appear from left to right. Parameters ---------- func : function Applied to each leaf ClusterNode object in the pre-order traversal. Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, the result of ``func(n[i])`` is stored in ``L[i]``. If not provided, the index of the original observation to which the node corresponds is used. Returns ------- L : list The pre-order traversal. """ # Do a preorder traversal, caching the result. To avoid having to do # recursion, we'll store the previous index we've visited in a vector. n = self.count curNode = [None] * (2 * n) lvisited = set() rvisited = set() curNode[0] = self k = 0 preorder = [] while k >= 0: nd = curNode[k] ndid = nd.id if nd.is_leaf(): preorder.append(func(nd)) k = k - 1 else: if ndid not in lvisited: curNode[k + 1] = nd.left lvisited.add(ndid) k = k + 1 elif ndid not in rvisited: curNode[k + 1] = nd.right rvisited.add(ndid) k = k + 1 # If we've visited the left and right of this non-leaf # node already, go up in the tree. else: k = k - 1 return preorder _cnode_bare = ClusterNode(0) _cnode_type = type(ClusterNode) def _order_cluster_tree(Z): """ Return clustering nodes in bottom-up order by distance. Parameters ---------- Z : scipy.cluster.linkage array The linkage matrix. Returns ------- nodes : list A list of ClusterNode objects. """ q = deque() tree = to_tree(Z) q.append(tree) nodes = [] while q: node = q.popleft() if not node.is_leaf(): bisect.insort_left(nodes, node) q.append(node.get_right()) q.append(node.get_left()) return nodes def cut_tree(Z, n_clusters=None, height=None): """ Given a linkage matrix Z, return the cut tree. Parameters ---------- Z : scipy.cluster.linkage array The linkage matrix. n_clusters : array_like, optional Number of clusters in the tree at the cut point. height : array_like, optional The height at which to cut the tree. Only possible for ultrametric trees. Returns ------- cutree : array An array indicating group membership at each agglomeration step. I.e., for a full cut tree, in the first column each data point is in its own cluster. At the next step, two nodes are merged. Finally all singleton and non-singleton clusters are in one group. If `n_clusters` or `height` is given, the columns correspond to the columns of `n_clusters` or `height`. Examples -------- >>> from scipy import cluster >>> np.random.seed(23) >>> X = np.random.randn(50, 4) >>> Z = cluster.hierarchy.ward(X) >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10]) >>> cutree[:10] array([[0, 0], [1, 1], [2, 2], [3, 3], [3, 4], [2, 2], [0, 0], [1, 5], [3, 6], [4, 7]]) """ nobs = num_obs_linkage(Z) nodes = _order_cluster_tree(Z) if height is not None and n_clusters is not None: raise ValueError("At least one of either height or n_clusters " "must be None") elif height is None and n_clusters is None: # return the full cut tree cols_idx = np.arange(nobs) elif height is not None: heights = np.array([x.dist for x in nodes]) cols_idx = np.searchsorted(heights, height) else: cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters) try: n_cols = len(cols_idx) except TypeError: # scalar n_cols = 1 cols_idx = np.array([cols_idx]) groups = np.zeros((n_cols, nobs), dtype=int) last_group = np.arange(nobs) if 0 in cols_idx: groups[0] = last_group for i, node in enumerate(nodes): idx = node.pre_order() this_group = last_group.copy() this_group[idx] = last_group[idx].min() this_group[this_group > last_group[idx].max()] -= 1 if i + 1 in cols_idx: groups[np.where(i + 1 == cols_idx)[0]] = this_group last_group = this_group return groups.T def to_tree(Z, rd=False): """ Convert a linkage matrix into an easy-to-use tree object. The reference to the root `ClusterNode` object is returned (by default). Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``, and ``count`` attribute. The left and right attributes point to ClusterNode objects that were combined to generate the cluster. If both are None then the `ClusterNode` object is a leaf node, its count must be 1, and its distance is meaningless but set to 0. *Note: This function is provided for the convenience of the library user. ClusterNodes are not used as input to any of the functions in this library.* Parameters ---------- Z : ndarray The linkage matrix in proper form (see the `linkage` function documentation). rd : bool, optional When False (default), a reference to the root `ClusterNode` object is returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a reference to the root node while ``d`` is a list of `ClusterNode` objects - one per original entry in the linkage matrix plus entries for all clustering steps. If a cluster id is less than the number of samples ``n`` in the data that the linkage matrix describes, then it corresponds to a singleton cluster (leaf node). See `linkage` for more information on the assignment of cluster ids to clusters. Returns ------- tree : ClusterNode or tuple (ClusterNode, list of ClusterNode) If ``rd`` is False, a `ClusterNode`. If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number of samples. See the description of `rd` above for more details. See Also -------- linkage, is_valid_linkage, ClusterNode Examples -------- >>> from scipy.cluster import hierarchy >>> x = np.random.rand(10).reshape(5, 2) >>> Z = hierarchy.linkage(x) >>> hierarchy.to_tree(Z) <scipy.cluster.hierarchy.ClusterNode object at ... >>> rootnode, nodelist = hierarchy.to_tree(Z, rd=True) >>> rootnode <scipy.cluster.hierarchy.ClusterNode object at ... >>> len(nodelist) 9 """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') # Number of original objects is equal to the number of rows minus 1. n = Z.shape[0] + 1 # Create a list full of None's to store the node objects d = [None] * (n * 2 - 1) # Create the nodes corresponding to the n original objects. for i in xrange(0, n): d[i] = ClusterNode(i) nd = None for i in xrange(0, n - 1): fi = int(Z[i, 0]) fj = int(Z[i, 1]) if fi > i + n: raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' 'is used before it is formed. See row %d, ' 'column 0') % fi) if fj > i + n: raise ValueError(('Corrupt matrix Z. Index to derivative cluster ' 'is used before it is formed. See row %d, ' 'column 1') % fj) nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2]) # ^ id ^ left ^ right ^ dist if Z[i, 3] != nd.count: raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is ' 'incorrect.') % i) d[n + i] = nd if rd: return (nd, d) else: return nd def optimal_leaf_ordering(Z, y, metric='euclidean'): """ Given a linkage matrix Z and distance, reorder the cut tree. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a linkage matrix. See `linkage` for more information on the return structure and algorithm. y : ndarray The condensed distance matrix from which Z was generated. Alternatively, a collection of m observation vectors in n dimensions may be passed as a m by n array. metric : str or function, optional The distance metric to use in the case that y is a collection of observation vectors; ignored otherwise. See the ``pdist`` function for a list of valid distance metrics. A custom distance function can also be used. Returns ------- Z_ordered : ndarray A copy of the linkage matrix Z, reordered to minimize the distance between adjacent leaves. Examples -------- >>> from scipy.cluster import hierarchy >>> np.random.seed(23) >>> X = np.random.randn(10,10) >>> Z = hierarchy.ward(X) >>> hierarchy.leaves_list(Z) array([0, 5, 3, 9, 6, 8, 1, 4, 2, 7], dtype=int32) >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X)) array([3, 9, 0, 5, 8, 2, 7, 4, 1, 6], dtype=int32) """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') y = _convert_to_double(np.asarray(y, order='c')) if y.ndim == 1: distance.is_valid_y(y, throw=True, name='y') [y] = _copy_arrays_if_base_present([y]) elif y.ndim == 2: if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0): if np.all(y >= 0) and np.allclose(y, y.T): _warning('The symmetric non-negative hollow observation ' 'matrix looks suspiciously like an uncondensed ' 'distance matrix') y = distance.pdist(y, metric) else: raise ValueError("`y` must be 1 or 2 dimensional.") if not np.all(np.isfinite(y)): raise ValueError("The condensed distance matrix must contain only " "finite values.") return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y) def _convert_to_bool(X): if X.dtype != bool: X = X.astype(bool) if not X.flags.contiguous: X = X.copy() return X def _convert_to_double(X): if X.dtype != np.double: X = X.astype(np.double) if not X.flags.contiguous: X = X.copy() return X def cophenet(Z, Y=None): """ Calculate the cophenetic distances between each observation in the hierarchical clustering defined by the linkage ``Z``. Suppose ``p`` and ``q`` are original observations in disjoint clusters ``s`` and ``t``, respectively and ``s`` and ``t`` are joined by a direct parent cluster ``u``. The cophenetic distance between observations ``i`` and ``j`` is simply the distance between clusters ``s`` and ``t``. Parameters ---------- Z : ndarray The hierarchical clustering encoded as an array (see `linkage` function). Y : ndarray (optional) Calculates the cophenetic correlation coefficient ``c`` of a hierarchical clustering defined by the linkage matrix `Z` of a set of :math:`n` observations in :math:`m` dimensions. `Y` is the condensed distance matrix from which `Z` was generated. Returns ------- c : ndarray The cophentic correlation distance (if ``Y`` is passed). d : ndarray The cophenetic distance matrix in condensed form. The :math:`ij` th entry is the cophenetic distance between original observations :math:`i` and :math:`j`. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 zz = np.zeros((n * (n-1)) // 2, dtype=np.double) # Since the C code does not support striding using strides. # The dimensions are used instead. Z = _convert_to_double(Z) _hierarchy.cophenetic_distances(Z, zz, int(n)) if Y is None: return zz Y = np.asarray(Y, order='c') distance.is_valid_y(Y, throw=True, name='Y') z = zz.mean() y = Y.mean() Yy = Y - y Zz = zz - z numerator = (Yy * Zz) denomA = Yy**2 denomB = Zz**2 c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum())) return (c, zz) def inconsistent(Z, d=2): r""" Calculate inconsistency statistics on a linkage matrix. Parameters ---------- Z : ndarray The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical clustering). See `linkage` documentation for more information on its form. d : int, optional The number of links up to `d` levels below each non-singleton cluster. Returns ------- R : ndarray A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link statistics for the non-singleton cluster ``i``. The link statistics are computed over the link heights for links :math:`d` levels below the cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard deviation of the link heights, respectively; ``R[i,2]`` is the number of links included in the calculation; and ``R[i,3]`` is the inconsistency coefficient, .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} Notes ----- This function behaves similarly to the MATLAB(TM) ``inconsistent`` function. Examples -------- >>> from scipy.cluster.hierarchy import inconsistent, linkage >>> from matplotlib import pyplot as plt >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] >>> Z = linkage(X, 'ward') >>> print(Z) [[ 5. 6. 0. 2. ] [ 2. 7. 0. 2. ] [ 0. 4. 1. 2. ] [ 1. 8. 1.15470054 3. ] [ 9. 10. 2.12132034 4. ] [ 3. 12. 4.11096096 5. ] [11. 13. 14.07183949 8. ]] >>> inconsistent(Z) array([[ 0. , 0. , 1. , 0. ], [ 0. , 0. , 1. , 0. ], [ 1. , 0. , 1. , 0. ], [ 0.57735027, 0.81649658, 2. , 0.70710678], [ 1.04044011, 1.06123822, 3. , 1.01850858], [ 3.11614065, 1.40688837, 2. , 0.70710678], [ 6.44583366, 6.76770586, 3. , 1.12682288]]) """ Z = np.asarray(Z, order='c') Zs = Z.shape is_valid_linkage(Z, throw=True, name='Z') if (not d == np.floor(d)) or d < 0: raise ValueError('The second argument d must be a nonnegative ' 'integer value.') # Since the C code does not support striding using strides. # The dimensions are used instead. [Z] = _copy_arrays_if_base_present([Z]) n = Zs[0] + 1 R = np.zeros((n - 1, 4), dtype=np.double) _hierarchy.inconsistent(Z, R, int(n), int(d)) return R def from_mlab_linkage(Z): """ Convert a linkage matrix generated by MATLAB(TM) to a new linkage matrix compatible with this module. The conversion does two things: * the indices are converted from ``1..N`` to ``0..(N-1)`` form, and * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the number of original observations (leaves) in the non-singleton cluster ``i``. This function is useful when loading in linkages from legacy data files generated by MATLAB. Parameters ---------- Z : ndarray A linkage matrix generated by MATLAB(TM). Returns ------- ZS : ndarray A linkage matrix compatible with ``scipy.cluster.hierarchy``. """ Z = np.asarray(Z, dtype=np.double, order='c') Zs = Z.shape # If it's empty, return it. if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): return Z.copy() if len(Zs) != 2: raise ValueError("The linkage array must be rectangular.") # If it contains no rows, return it. if Zs[0] == 0: return Z.copy() Zpart = Z.copy() if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]: raise ValueError('The format of the indices is not 1..N') Zpart[:, 0:2] -= 1.0 CS = np.zeros((Zs[0],), dtype=np.double) _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1) return np.hstack([Zpart, CS.reshape(Zs[0], 1)]) def to_mlab_linkage(Z): """ Convert a linkage matrix to a MATLAB(TM) compatible one. Converts a linkage matrix ``Z`` generated by the linkage function of this module to a MATLAB(TM) compatible one. The return linkage matrix has the last column removed and the cluster indices are converted to ``1..N`` indexing. Parameters ---------- Z : ndarray A linkage matrix generated by ``scipy.cluster.hierarchy``. Returns ------- to_mlab_linkage : ndarray A linkage matrix compatible with MATLAB(TM)'s hierarchical clustering functions. The return linkage matrix has the last column removed and the cluster indices are converted to ``1..N`` indexing. """ Z = np.asarray(Z, order='c', dtype=np.double) Zs = Z.shape if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0): return Z.copy() is_valid_linkage(Z, throw=True, name='Z') ZP = Z[:, 0:3].copy() ZP[:, 0:2] += 1.0 return ZP def is_monotonic(Z): """ Return True if the linkage passed is monotonic. The linkage is monotonic if for every cluster :math:`s` and :math:`t` joined, the distance between them is no less than the distance between any previously joined clusters. Parameters ---------- Z : ndarray The linkage matrix to check for monotonicity. Returns ------- b : bool A boolean indicating whether the linkage is monotonic. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') # We expect the i'th value to be greater than its successor. return (Z[1:, 2] >= Z[:-1, 2]).all() def is_valid_im(R, warning=False, throw=False, name=None): """Return True if the inconsistency matrix passed is valid. It must be a :math:`n` by 4 array of doubles. The standard deviations ``R[:,1]`` must be nonnegative. The link counts ``R[:,2]`` must be positive and no greater than :math:`n-1`. Parameters ---------- R : ndarray The inconsistency matrix to check for validity. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. """ R = np.asarray(R, order='c') valid = True name_str = "%r " % name if name else '' try: if type(R) != np.ndarray: raise TypeError('Variable %spassed as inconsistency matrix is not ' 'a numpy array.' % name_str) if R.dtype != np.double: raise TypeError('Inconsistency matrix %smust contain doubles ' '(double).' % name_str) if len(R.shape) != 2: raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. ' 'be two-dimensional).' % name_str) if R.shape[1] != 4: raise ValueError('Inconsistency matrix %smust have 4 columns.' % name_str) if R.shape[0] < 1: raise ValueError('Inconsistency matrix %smust have at least one ' 'row.' % name_str) if (R[:, 0] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'height means.' % name_str) if (R[:, 1] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'height standard deviations.' % name_str) if (R[:, 2] < 0).any(): raise ValueError('Inconsistency matrix %scontains negative link ' 'counts.' % name_str) except Exception as e: if throw: raise if warning: _warning(str(e)) valid = False return valid def is_valid_linkage(Z, warning=False, throw=False, name=None): """ Check the validity of a linkage matrix. A linkage matrix is valid if it is a two dimensional array (type double) with :math:`n` rows and 4 columns. The first two columns must contain indices between 0 and :math:`2n-1`. For a given row ``i``, the following two expressions have to hold: .. math:: 0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1 0 \\leq Z[i,1] \\leq i+n-1 I.e. a cluster cannot join another cluster unless the cluster being joined has been generated. Parameters ---------- Z : array_like Linkage matrix. warning : bool, optional When True, issues a Python warning if the linkage matrix passed is invalid. throw : bool, optional When True, throws a Python exception if the linkage matrix passed is invalid. name : str, optional This string refers to the variable name of the invalid linkage matrix. Returns ------- b : bool True if the inconsistency matrix is valid. """ Z = np.asarray(Z, order='c') valid = True name_str = "%r " % name if name else '' try: if type(Z) != np.ndarray: raise TypeError('Passed linkage argument %sis not a valid array.' % name_str) if Z.dtype != np.double: raise TypeError('Linkage matrix %smust contain doubles.' % name_str) if len(Z.shape) != 2: raise ValueError('Linkage matrix %smust have shape=2 (i.e. be ' 'two-dimensional).' % name_str) if Z.shape[1] != 4: raise ValueError('Linkage matrix %smust have 4 columns.' % name_str) if Z.shape[0] == 0: raise ValueError('Linkage must be computed on at least two ' 'observations.') n = Z.shape[0] if n > 1: if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()): raise ValueError('Linkage %scontains negative indices.' % name_str) if (Z[:, 2] < 0).any(): raise ValueError('Linkage %scontains negative distances.' % name_str) if (Z[:, 3] < 0).any(): raise ValueError('Linkage %scontains negative counts.' % name_str) if _check_hierarchy_uses_cluster_before_formed(Z): raise ValueError('Linkage %suses non-singleton cluster before ' 'it is formed.' % name_str) if _check_hierarchy_uses_cluster_more_than_once(Z): raise ValueError('Linkage %suses the same cluster more than once.' % name_str) except Exception as e: if throw: raise if warning: _warning(str(e)) valid = False return valid def _check_hierarchy_uses_cluster_before_formed(Z): n = Z.shape[0] + 1 for i in xrange(0, n - 1): if Z[i, 0] >= n + i or Z[i, 1] >= n + i: return True return False def _check_hierarchy_uses_cluster_more_than_once(Z): n = Z.shape[0] + 1 chosen = set([]) for i in xrange(0, n - 1): if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]: return True chosen.add(Z[i, 0]) chosen.add(Z[i, 1]) return False def _check_hierarchy_not_all_clusters_used(Z): n = Z.shape[0] + 1 chosen = set([]) for i in xrange(0, n - 1): chosen.add(int(Z[i, 0])) chosen.add(int(Z[i, 1])) must_chosen = set(range(0, 2 * n - 2)) return len(must_chosen.difference(chosen)) > 0 def num_obs_linkage(Z): """ Return the number of original observations of the linkage matrix passed. Parameters ---------- Z : ndarray The linkage matrix on which to perform the operation. Returns ------- n : int The number of original observations in the linkage. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') return (Z.shape[0] + 1) def correspond(Z, Y): """ Check for correspondence between linkage and condensed distance matrices. They must have the same number of original observations for the check to succeed. This function is useful as a sanity check in algorithms that make extensive use of linkage and distance matrices that must correspond to the same set of original observations. Parameters ---------- Z : array_like The linkage matrix to check for correspondence. Y : array_like The condensed distance matrix to check for correspondence. Returns ------- b : bool A boolean indicating whether the linkage matrix and distance matrix could possibly correspond to one another. """ is_valid_linkage(Z, throw=True) distance.is_valid_y(Y, throw=True) Z = np.asarray(Z, order='c') Y = np.asarray(Y, order='c') return distance.num_obs_y(Y) == num_obs_linkage(Z) def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): """ Form flat clusters from the hierarchical clustering defined by the given linkage matrix. Parameters ---------- Z : ndarray The hierarchical clustering encoded with the matrix returned by the `linkage` function. t : float The threshold to apply when forming flat clusters. criterion : str, optional The criterion to use in forming flat clusters. This can be any of the following values: ``inconsistent`` : If a cluster node and all its descendants have an inconsistent value less than or equal to `t` then all its leaf descendants belong to the same flat cluster. When no non-singleton cluster meets this criterion, every node is assigned to its own cluster. (Default) ``distance`` : Forms flat clusters so that the original observations in each flat cluster have no greater a cophenetic distance than `t`. ``maxclust`` : Finds a minimum threshold ``r`` so that the cophenetic distance between any two original observations in the same flat cluster is no more than ``r`` and no more than `t` flat clusters are formed. ``monocrit`` : Forms a flat cluster from a cluster node c with index i when ``monocrit[j] <= t``. For example, to threshold on the maximum mean distance as computed in the inconsistency matrix R with a threshold of 0.8 do:: MR = maxRstat(Z, R, 3) cluster(Z, t=0.8, criterion='monocrit', monocrit=MR) ``maxclust_monocrit`` : Forms a flat cluster from a non-singleton cluster node ``c`` when ``monocrit[i] <= r`` for all cluster indices ``i`` below and including ``c``. ``r`` is minimized such that no more than ``t`` flat clusters are formed. monocrit must be monotonic. For example, to minimize the threshold t on maximum inconsistency values so that no more than 3 flat clusters are formed, do:: MI = maxinconsts(Z, R) cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) depth : int, optional The maximum depth to perform the inconsistency calculation. It has no meaning for the other criteria. Default is 2. R : ndarray, optional The inconsistency matrix to use for the 'inconsistent' criterion. This matrix is computed if not provided. monocrit : ndarray, optional An array of length n-1. `monocrit[i]` is the statistics upon which non-singleton i is thresholded. The monocrit vector must be monotonic, i.e. given a node c with index i, for all node indices j corresponding to nodes below c, ``monocrit[i] >= monocrit[j]``. Returns ------- fcluster : ndarray An array of length ``n``. ``T[i]`` is the flat cluster number to which original observation ``i`` belongs. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 T = np.zeros((n,), dtype='i') # Since the C code does not support striding using strides. # The dimensions are used instead. [Z] = _copy_arrays_if_base_present([Z]) if criterion == 'inconsistent': if R is None: R = inconsistent(Z, depth) else: R = np.asarray(R, order='c') is_valid_im(R, throw=True, name='R') # Since the C code does not support striding using strides. # The dimensions are used instead. [R] = _copy_arrays_if_base_present([R]) _hierarchy.cluster_in(Z, R, T, float(t), int(n)) elif criterion == 'distance': _hierarchy.cluster_dist(Z, T, float(t), int(n)) elif criterion == 'maxclust': _hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t)) elif criterion == 'monocrit': [monocrit] = _copy_arrays_if_base_present([monocrit]) _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n)) elif criterion == 'maxclust_monocrit': [monocrit] = _copy_arrays_if_base_present([monocrit]) _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t)) else: raise ValueError('Invalid cluster formation criterion: %s' % str(criterion)) return T def fclusterdata(X, t, criterion='inconsistent', metric='euclidean', depth=2, method='single', R=None): """ Cluster observation data using a given metric. Clusters the original observations in the n-by-m data matrix X (n observations in m dimensions), using the euclidean distance metric to calculate distances between original observations, performs hierarchical clustering using the single linkage algorithm, and forms flat clusters using the inconsistency method with `t` as the cut-off threshold. A one-dimensional array ``T`` of length ``n`` is returned. ``T[i]`` is the index of the flat cluster to which the original observation ``i`` belongs. Parameters ---------- X : (N, M) ndarray N by M data matrix with N observations in M dimensions. t : float The threshold to apply when forming flat clusters. criterion : str, optional Specifies the criterion for forming flat clusters. Valid values are 'inconsistent' (default), 'distance', or 'maxclust' cluster formation algorithms. See `fcluster` for descriptions. metric : str, optional The distance metric for calculating pairwise distances. See ``distance.pdist`` for descriptions and linkage to verify compatibility with the linkage method. depth : int, optional The maximum depth for the inconsistency calculation. See `inconsistent` for more information. method : str, optional The linkage method to use (single, complete, average, weighted, median centroid, ward). See `linkage` for more information. Default is "single". R : ndarray, optional The inconsistency matrix. It will be computed if necessary if it is not passed. Returns ------- fclusterdata : ndarray A vector of length n. T[i] is the flat cluster number to which original observation i belongs. See Also -------- scipy.spatial.distance.pdist : pairwise distance metrics Notes ----- This function is similar to the MATLAB function ``clusterdata``. """ X = np.asarray(X, order='c', dtype=np.double) if type(X) != np.ndarray or len(X.shape) != 2: raise TypeError('The observation matrix X must be an n by m numpy ' 'array.') Y = distance.pdist(X, metric=metric) Z = linkage(Y, method=method) if R is None: R = inconsistent(Z, d=depth) else: R = np.asarray(R, order='c') T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) return T def leaves_list(Z): """ Return a list of leaf node ids. The return corresponds to the observation vector index as it appears in the tree from left to right. Z is a linkage matrix. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. `Z` is a linkage matrix. See `linkage` for more information. Returns ------- leaves_list : ndarray The list of leaf node ids. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 ML = np.zeros((n,), dtype='i') [Z] = _copy_arrays_if_base_present([Z]) _hierarchy.prelist(Z, ML, int(n)) return ML # Maps number of leaves to text size. # # p <= 20, size="12" # 20 < p <= 30, size="10" # 30 < p <= 50, size="8" # 50 < p <= np.inf, size="6" _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} _drotation = {20: 0, 40: 45, np.inf: 90} _dtextsortedkeys = list(_dtextsizes.keys()) _dtextsortedkeys.sort() _drotationsortedkeys = list(_drotation.keys()) _drotationsortedkeys.sort() def _remove_dups(L): """ Remove duplicates AND preserve the original order of the elements. The set class is not guaranteed to do this. """ seen_before = set([]) L2 = [] for i in L: if i not in seen_before: seen_before.add(i) L2.append(i) return L2 def _get_tick_text_size(p): for k in _dtextsortedkeys: if p <= k: return _dtextsizes[k] def _get_tick_rotation(p): for k in _drotationsortedkeys: if p <= k: return _drotation[k] def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=None, leaf_rotation=None, contraction_marks=None, ax=None, above_threshold_color='b'): # Import matplotlib here so that it's not imported unless dendrograms # are plotted. Raise an informative error if importing fails. try: # if an axis is provided, don't use pylab at all if ax is None: import matplotlib.pylab import matplotlib.patches import matplotlib.collections except ImportError: raise ImportError("You must install the matplotlib library to plot " "the dendrogram. Use no_plot=True to calculate the " "dendrogram without plotting.") if ax is None: ax = matplotlib.pylab.gca() # if we're using pylab, we want to trigger a draw at the end trigger_redraw = True else: trigger_redraw = False # Independent variable plot width ivw = len(ivl) * 10 # Dependent variable plot height dvw = mh + mh * 0.05 iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10) if orientation in ('top', 'bottom'): if orientation == 'top': ax.set_ylim([0, dvw]) ax.set_xlim([0, ivw]) else: ax.set_ylim([dvw, 0]) ax.set_xlim([0, ivw]) xlines = icoords ylines = dcoords if no_labels: ax.set_xticks([]) ax.set_xticklabels([]) else: ax.set_xticks(iv_ticks) if orientation == 'top': ax.xaxis.set_ticks_position('bottom') else: ax.xaxis.set_ticks_position('top') # Make the tick marks invisible because they cover up the links for line in ax.get_xticklines(): line.set_visible(False) leaf_rot = (float(_get_tick_rotation(len(ivl))) if (leaf_rotation is None) else leaf_rotation) leaf_font = (float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size) ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font) elif orientation in ('left', 'right'): if orientation == 'left': ax.set_xlim([dvw, 0]) ax.set_ylim([0, ivw]) else: ax.set_xlim([0, dvw]) ax.set_ylim([0, ivw]) xlines = dcoords ylines = icoords if no_labels: ax.set_yticks([]) ax.set_yticklabels([]) else: ax.set_yticks(iv_ticks) if orientation == 'left': ax.yaxis.set_ticks_position('right') else: ax.yaxis.set_ticks_position('left') # Make the tick marks invisible because they cover up the links for line in ax.get_yticklines(): line.set_visible(False) leaf_font = (float(_get_tick_text_size(len(ivl))) if (leaf_font_size is None) else leaf_font_size) if leaf_rotation is not None: ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font) else: ax.set_yticklabels(ivl, size=leaf_font) # Let's use collections instead. This way there is a separate legend item # for each tree grouping, rather than stupidly one for each line segment. colors_used = _remove_dups(color_list) color_to_lines = {} for color in colors_used: color_to_lines[color] = [] for (xline, yline, color) in zip(xlines, ylines, color_list): color_to_lines[color].append(list(zip(xline, yline))) colors_to_collections = {} # Construct the collections. for color in colors_used: coll = matplotlib.collections.LineCollection(color_to_lines[color], colors=(color,)) colors_to_collections[color] = coll # Add all the groupings below the color threshold. for color in colors_used: if color != above_threshold_color: ax.add_collection(colors_to_collections[color]) # If there's a grouping of links above the color threshold, it goes last. if above_threshold_color in colors_to_collections: ax.add_collection(colors_to_collections[above_threshold_color]) if contraction_marks is not None: Ellipse = matplotlib.patches.Ellipse for (x, y) in contraction_marks: if orientation in ('left', 'right'): e = Ellipse((y, x), width=dvw / 100, height=1.0) else: e = Ellipse((x, y), width=1.0, height=dvw / 100) ax.add_artist(e) e.set_clip_box(ax.bbox) e.set_alpha(0.5) e.set_facecolor('k') if trigger_redraw: matplotlib.pylab.draw_if_interactive() _link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k'] def set_link_color_palette(palette): """ Set list of matplotlib color codes for use by dendrogram. Note that this palette is global (i.e. setting it once changes the colors for all subsequent calls to `dendrogram`) and that it affects only the the colors below ``color_threshold``. Note that `dendrogram` also accepts a custom coloring function through its ``link_color_func`` keyword, which is more flexible and non-global. Parameters ---------- palette : list of str or None A list of matplotlib color codes. The order of the color codes is the order in which the colors are cycled through when color thresholding in the dendrogram. If ``None``, resets the palette to its default (which is ``['g', 'r', 'c', 'm', 'y', 'k']``). Returns ------- None See Also -------- dendrogram Notes ----- Ability to reset the palette with ``None`` added in Scipy 0.17.0. Examples -------- >>> from scipy.cluster import hierarchy >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., ... 400., 754., 564., 138., 219., 869., 669.]) >>> Z = hierarchy.linkage(ytdist, 'single') >>> dn = hierarchy.dendrogram(Z, no_plot=True) >>> dn['color_list'] ['g', 'b', 'b', 'b', 'b'] >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k']) >>> dn = hierarchy.dendrogram(Z, no_plot=True) >>> dn['color_list'] ['c', 'b', 'b', 'b', 'b'] >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267, ... above_threshold_color='k') >>> dn['color_list'] ['c', 'm', 'm', 'k', 'k'] Now reset the color palette to its default: >>> hierarchy.set_link_color_palette(None) """ if palette is None: # reset to its default palette = ['g', 'r', 'c', 'm', 'y', 'k'] elif type(palette) not in (list, tuple): raise TypeError("palette must be a list or tuple") _ptypes = [isinstance(p, string_types) for p in palette] if False in _ptypes: raise TypeError("all palette list elements must be color strings") for i in list(_link_line_colors): _link_line_colors.remove(i) _link_line_colors.extend(list(palette)) def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None, get_leaves=True, orientation='top', labels=None, count_sort=False, distance_sort=False, show_leaf_counts=True, no_plot=False, no_labels=False, leaf_font_size=None, leaf_rotation=None, leaf_label_func=None, show_contracted=False, link_color_func=None, ax=None, above_threshold_color='b'): """ Plot the hierarchical clustering as a dendrogram. The dendrogram illustrates how each cluster is composed by drawing a U-shaped link between a non-singleton cluster and its children. The top of the U-link indicates a cluster merge. The two legs of the U-link indicate which clusters were merged. The length of the two legs of the U-link represents the distance between the child clusters. It is also the cophenetic distance between original observations in the two children clusters. Parameters ---------- Z : ndarray The linkage matrix encoding the hierarchical clustering to render as a dendrogram. See the ``linkage`` function for more information on the format of ``Z``. p : int, optional The ``p`` parameter for ``truncate_mode``. truncate_mode : str, optional The dendrogram can be hard to read when the original observation matrix from which the linkage is derived is large. Truncation is used to condense the dendrogram. There are several modes: ``None`` No truncation is performed (default). Note: ``'none'`` is an alias for ``None`` that's kept for backward compatibility. ``'lastp'`` The last ``p`` non-singleton clusters formed in the linkage are the only non-leaf nodes in the linkage; they correspond to rows ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are contracted into leaf nodes. ``'level'`` No more than ``p`` levels of the dendrogram tree are displayed. A "level" includes all nodes with ``p`` merges from the last merge. Note: ``'mtica'`` is an alias for ``'level'`` that's kept for backward compatibility. color_threshold : double, optional For brevity, let :math:`t` be the ``color_threshold``. Colors all the descendent links below a cluster node :math:`k` the same color if :math:`k` is the first node below the cut threshold :math:`t`. All links connecting nodes with distances greater than or equal to the threshold are colored blue. If :math:`t` is less than or equal to zero, all nodes are colored blue. If ``color_threshold`` is None or 'default', corresponding with MATLAB(TM) behavior, the threshold is set to ``0.7*max(Z[:,2])``. get_leaves : bool, optional Includes a list ``R['leaves']=H`` in the result dictionary. For each :math:`i`, ``H[i] == j``, cluster node ``j`` appears in position ``i`` in the left-to-right traversal of the leaves, where :math:`j < 2n-1` and :math:`i < n`. orientation : str, optional The direction to plot the dendrogram, which can be any of the following strings: ``'top'`` Plots the root at the top, and plot descendent links going downwards. (default). ``'bottom'`` Plots the root at the bottom, and plot descendent links going upwards. ``'left'`` Plots the root at the left, and plot descendent links going right. ``'right'`` Plots the root at the right, and plot descendent links going left. labels : ndarray, optional By default ``labels`` is None so the index of the original observation is used to label the leaf nodes. Otherwise, this is an :math:`n` -sized list (or tuple). The ``labels[i]`` value is the text to put under the :math:`i` th leaf node only if it corresponds to an original observation and not a non-singleton cluster. count_sort : str or bool, optional For each node n, the order (visually, from left-to-right) n's two descendent links are plotted is determined by this parameter, which can be any of the following values: ``False`` Nothing is done. ``'ascending'`` or ``True`` The child with the minimum number of original objects in its cluster is plotted first. ``'descendent'`` The child with the maximum number of original objects in its cluster is plotted first. Note ``distance_sort`` and ``count_sort`` cannot both be True. distance_sort : str or bool, optional For each node n, the order (visually, from left-to-right) n's two descendent links are plotted is determined by this parameter, which can be any of the following values: ``False`` Nothing is done. ``'ascending'`` or ``True`` The child with the minimum distance between its direct descendents is plotted first. ``'descending'`` The child with the maximum distance between its direct descendents is plotted first. Note ``distance_sort`` and ``count_sort`` cannot both be True. show_leaf_counts : bool, optional When True, leaf nodes representing :math:`k>1` original observation are labeled with the number of observations they contain in parentheses. no_plot : bool, optional When True, the final rendering is not performed. This is useful if only the data structures computed for the rendering are needed or if matplotlib is not available. no_labels : bool, optional When True, no labels appear next to the leaf nodes in the rendering of the dendrogram. leaf_rotation : double, optional Specifies the angle (in degrees) to rotate the leaf labels. When unspecified, the rotation is based on the number of nodes in the dendrogram (default is 0). leaf_font_size : int, optional Specifies the font size (in points) of the leaf labels. When unspecified, the size based on the number of nodes in the dendrogram. leaf_label_func : lambda or function, optional When leaf_label_func is a callable function, for each leaf with cluster index :math:`k < 2n-1`. The function is expected to return a string with the label for the leaf. Indices :math:`k < n` correspond to original observations while indices :math:`k \\geq n` correspond to non-singleton clusters. For example, to label singletons with their node id and non-singletons with their id, count, and inconsistency coefficient, simply do:: # First define the leaf label function. def llf(id): if id < n: return str(id) else: return '[%d %d %1.2f]' % (id, count, R[n-id,3]) # The text for the leaf nodes is going to be big so force # a rotation of 90 degrees. dendrogram(Z, leaf_label_func=llf, leaf_rotation=90) show_contracted : bool, optional When True the heights of non-singleton nodes contracted into a leaf node are plotted as crosses along the link connecting that leaf node. This really is only useful when truncation is used (see ``truncate_mode`` parameter). link_color_func : callable, optional If given, `link_color_function` is called with each non-singleton id corresponding to each U-shaped link it will paint. The function is expected to return the color to paint the link, encoded as a matplotlib color string code. For example:: dendrogram(Z, link_color_func=lambda k: colors[k]) colors the direct links below each untruncated non-singleton node ``k`` using ``colors[k]``. ax : matplotlib Axes instance, optional If None and `no_plot` is not True, the dendrogram will be plotted on the current axes. Otherwise if `no_plot` is not True the dendrogram will be plotted on the given ``Axes`` instance. This can be useful if the dendrogram is part of a more complex figure. above_threshold_color : str, optional This matplotlib color string sets the color of the links above the color_threshold. The default is 'b'. Returns ------- R : dict A dictionary of data structures computed to render the dendrogram. Its has the following keys: ``'color_list'`` A list of color names. The k'th element represents the color of the k'th link. ``'icoord'`` and ``'dcoord'`` Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. ``'ivl'`` A list of labels corresponding to the leaf nodes. ``'leaves'`` For each i, ``H[i] == j``, cluster node ``j`` appears in position ``i`` in the left-to-right traversal of the leaves, where :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the ``i``-th leaf node corresponds to an original observation. Otherwise, it corresponds to a non-singleton cluster. See Also -------- linkage, set_link_color_palette Notes ----- It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise crossings appear in the dendrogram. Examples -------- >>> from scipy.cluster import hierarchy >>> import matplotlib.pyplot as plt A very basic example: >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., ... 400., 754., 564., 138., 219., 869., 669.]) >>> Z = hierarchy.linkage(ytdist, 'single') >>> plt.figure() >>> dn = hierarchy.dendrogram(Z) Now plot in given axes, improve the color scheme and use both vertical and horizontal orientations: >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', ... orientation='top') >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], ... above_threshold_color='#bcbddc', ... orientation='right') >>> hierarchy.set_link_color_palette(None) # reset to default after use >>> plt.show() """ # This feature was thought about but never implemented (still useful?): # # ... = dendrogram(..., leaves_order=None) # # Plots the leaves in the order specified by a vector of # original observation indices. If the vector contains duplicates # or results in a crossing, an exception will be thrown. Passing # None orders leaf nodes based on the order they appear in the # pre-order traversal. Z = np.asarray(Z, order='c') if orientation not in ["top", "left", "bottom", "right"]: raise ValueError("orientation must be one of 'top', 'left', " "'bottom', or 'right'") is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 if type(p) in (int, float): p = int(p) else: raise TypeError('The second argument must be a number') if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None): # 'mlab' and 'mtica' are kept working for backwards compat. raise ValueError('Invalid truncation mode.') if truncate_mode == 'lastp' or truncate_mode == 'mlab': if p > n or p == 0: p = n if truncate_mode == 'mtica': # 'mtica' is an alias truncate_mode = 'level' if truncate_mode == 'level': if p <= 0: p = np.inf if get_leaves: lvs = [] else: lvs = None icoord_list = [] dcoord_list = [] color_list = [] current_color = [0] currently_below_threshold = [False] ivl = [] # list of leaves if color_threshold is None or (isinstance(color_threshold, string_types) and color_threshold == 'default'): color_threshold = max(Z[:, 2]) * 0.7 R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, 'leaves': lvs, 'color_list': color_list} # Empty list will be filled in _dendrogram_calculate_info contraction_marks = [] if show_contracted else None _dendrogram_calculate_info( Z=Z, p=p, truncate_mode=truncate_mode, color_threshold=color_threshold, get_leaves=get_leaves, orientation=orientation, labels=labels, count_sort=count_sort, distance_sort=distance_sort, show_leaf_counts=show_leaf_counts, i=2*n - 2, iv=0.0, ivl=ivl, n=n, icoord_list=icoord_list, dcoord_list=dcoord_list, lvs=lvs, current_color=current_color, color_list=color_list, currently_below_threshold=currently_below_threshold, leaf_label_func=leaf_label_func, contraction_marks=contraction_marks, link_color_func=link_color_func, above_threshold_color=above_threshold_color) if not no_plot: mh = max(Z[:, 2]) _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, contraction_marks=contraction_marks, ax=ax, above_threshold_color=above_threshold_color) return R def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) # If leaf node labels are to be displayed... if ivl is not None: # If a leaf_label_func has been provided, the label comes from the # string returned from the leaf_label_func, which is a function # passed to dendrogram. if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: # Otherwise, if the dendrogram caller has passed a labels list # for the leaf nodes, use it. if labels is not None: ivl.append(labels[int(i - n)]) else: # Otherwise, use the id as the label for the leaf.x ivl.append(str(int(i))) def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) if ivl is not None: if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: if show_leaf_counts: ivl.append("(" + str(int(Z[i - n, 3])) + ")") else: ivl.append("") def _append_contraction_marks(Z, iv, i, n, contraction_marks): _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks): if i >= n: contraction_marks.append((iv, Z[i - n, 2])) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) def _dendrogram_calculate_info(Z, p, truncate_mode, color_threshold=np.inf, get_leaves=True, orientation='top', labels=None, count_sort=False, distance_sort=False, show_leaf_counts=False, i=-1, iv=0.0, ivl=[], n=0, icoord_list=[], dcoord_list=[], lvs=None, mhr=False, current_color=[], color_list=[], currently_below_threshold=[], leaf_label_func=None, level=0, contraction_marks=None, link_color_func=None, above_threshold_color='b'): """ Calculate the endpoints of the links as well as the labels for the the dendrogram rooted at the node with index i. iv is the independent variable value to plot the left-most leaf node below the root node i (if orientation='top', this would be the left-most x value where the plotting of this root node i and its descendents should begin). ivl is a list to store the labels of the leaf nodes. The leaf_label_func is called whenever ivl != None, labels == None, and leaf_label_func != None. When ivl != None and labels != None, the labels list is used only for labeling the leaf nodes. When ivl == None, no labels are generated for leaf nodes. When get_leaves==True, a list of leaves is built as they are visited in the dendrogram. Returns a tuple with l being the independent variable coordinate that corresponds to the midpoint of cluster to the left of cluster i if i is non-singleton, otherwise the independent coordinate of the leaf node if i is a leaf node. Returns ------- A tuple (left, w, h, md), where: * left is the independent variable coordinate of the center of the the U of the subtree * w is the amount of space used for the subtree (in independent variable units) * h is the height of the subtree in dependent variable units * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including the target node. """ if n == 0: raise ValueError("Invalid singleton cluster count n.") if i == -1: raise ValueError("Invalid root cluster index i.") if truncate_mode == 'lastp': # If the node is a leaf node but corresponds to a non-singleton # cluster, its label is either the empty string or the number of # original observations belonging to cluster i. if 2*n - p > i >= n: d = Z[i - n, 2] _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts) if contraction_marks is not None: _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) return (iv + 5.0, 10.0, 0.0, d) elif i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) elif truncate_mode == 'level': if i > n and level > p: d = Z[i - n, 2] _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts) if contraction_marks is not None: _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks) return (iv + 5.0, 10.0, 0.0, d) elif i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) elif truncate_mode in ('mlab',): msg = "Mode 'mlab' is deprecated in scipy 0.19.0 (it never worked)." warnings.warn(msg, DeprecationWarning) # Otherwise, only truncate if we have a leaf node. # # Only place leaves if they correspond to original observations. if i < n: _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels) return (iv + 5.0, 10.0, 0.0, 0.0) # !!! Otherwise, we don't have a leaf node, so work on plotting a # non-leaf node. # Actual indices of a and b aa = int(Z[i - n, 0]) ab = int(Z[i - n, 1]) if aa > n: # The number of singletons below cluster a na = Z[aa - n, 3] # The distance between a's two direct children. da = Z[aa - n, 2] else: na = 1 da = 0.0 if ab > n: nb = Z[ab - n, 3] db = Z[ab - n, 2] else: nb = 1 db = 0.0 if count_sort == 'ascending' or count_sort: # If a has a count greater than b, it and its descendents should # be drawn to the right. Otherwise, to the left. if na > nb: # The cluster index to draw to the left (ua) will be ab # and the one to draw to the right (ub) will be aa ua = ab ub = aa else: ua = aa ub = ab elif count_sort == 'descending': # If a has a count less than or equal to b, it and its # descendents should be drawn to the left. Otherwise, to # the right. if na > nb: ua = aa ub = ab else: ua = ab ub = aa elif distance_sort == 'ascending' or distance_sort: # If a has a distance greater than b, it and its descendents should # be drawn to the right. Otherwise, to the left. if da > db: ua = ab ub = aa else: ua = aa ub = ab elif distance_sort == 'descending': # If a has a distance less than or equal to b, it and its # descendents should be drawn to the left. Otherwise, to # the right. if da > db: ua = aa ub = ab else: ua = ab ub = aa else: ua = aa ub = ab # Updated iv variable and the amount of space used. (uiva, uwa, uah, uamd) = \ _dendrogram_calculate_info( Z=Z, p=p, truncate_mode=truncate_mode, color_threshold=color_threshold, get_leaves=get_leaves, orientation=orientation, labels=labels, count_sort=count_sort, distance_sort=distance_sort, show_leaf_counts=show_leaf_counts, i=ua, iv=iv, ivl=ivl, n=n, icoord_list=icoord_list, dcoord_list=dcoord_list, lvs=lvs, current_color=current_color, color_list=color_list, currently_below_threshold=currently_below_threshold, leaf_label_func=leaf_label_func, level=level + 1, contraction_marks=contraction_marks, link_color_func=link_color_func, above_threshold_color=above_threshold_color) h = Z[i - n, 2] if h >= color_threshold or color_threshold <= 0: c = above_threshold_color if currently_below_threshold[0]: current_color[0] = (current_color[0] + 1) % len(_link_line_colors) currently_below_threshold[0] = False else: currently_below_threshold[0] = True c = _link_line_colors[current_color[0]] (uivb, uwb, ubh, ubmd) = \ _dendrogram_calculate_info( Z=Z, p=p, truncate_mode=truncate_mode, color_threshold=color_threshold, get_leaves=get_leaves, orientation=orientation, labels=labels, count_sort=count_sort, distance_sort=distance_sort, show_leaf_counts=show_leaf_counts, i=ub, iv=iv + uwa, ivl=ivl, n=n, icoord_list=icoord_list, dcoord_list=dcoord_list, lvs=lvs, current_color=current_color, color_list=color_list, currently_below_threshold=currently_below_threshold, leaf_label_func=leaf_label_func, level=level + 1, contraction_marks=contraction_marks, link_color_func=link_color_func, above_threshold_color=above_threshold_color) max_dist = max(uamd, ubmd, h) icoord_list.append([uiva, uiva, uivb, uivb]) dcoord_list.append([uah, h, h, ubh]) if link_color_func is not None: v = link_color_func(int(i)) if not isinstance(v, string_types): raise TypeError("link_color_func must return a matplotlib " "color string!") color_list.append(v) else: color_list.append(c) return (((uiva + uivb) / 2), uwa + uwb, h, max_dist) def is_isomorphic(T1, T2): """ Determine if two different cluster assignments are equivalent. Parameters ---------- T1 : array_like An assignment of singleton cluster ids to flat cluster ids. T2 : array_like An assignment of singleton cluster ids to flat cluster ids. Returns ------- b : bool Whether the flat cluster assignments `T1` and `T2` are equivalent. """ T1 = np.asarray(T1, order='c') T2 = np.asarray(T2, order='c') if type(T1) != np.ndarray: raise TypeError('T1 must be a numpy array.') if type(T2) != np.ndarray: raise TypeError('T2 must be a numpy array.') T1S = T1.shape T2S = T2.shape if len(T1S) != 1: raise ValueError('T1 must be one-dimensional.') if len(T2S) != 1: raise ValueError('T2 must be one-dimensional.') if T1S[0] != T2S[0]: raise ValueError('T1 and T2 must have the same number of elements.') n = T1S[0] d1 = {} d2 = {} for i in xrange(0, n): if T1[i] in d1: if not T2[i] in d2: return False if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]: return False elif T2[i] in d2: return False else: d1[T1[i]] = T2[i] d2[T2[i]] = T1[i] return True def maxdists(Z): """ Return the maximum distance between any non-singleton cluster. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See ``linkage`` for more information. Returns ------- maxdists : ndarray A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents the maximum distance between any cluster (including singletons) below and including the node with index i. More specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the set of all node indices below and including node i. """ Z = np.asarray(Z, order='c', dtype=np.double) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 MD = np.zeros((n - 1,)) [Z] = _copy_arrays_if_base_present([Z]) _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) return MD def maxinconsts(Z, R): """ Return the maximum inconsistency coefficient for each non-singleton cluster and its descendents. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See `linkage` for more information. R : ndarray The inconsistency matrix. Returns ------- MI : ndarray A monotonic ``(n-1)``-sized numpy array of doubles. """ Z = np.asarray(Z, order='c') R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') n = Z.shape[0] + 1 if Z.shape[0] != R.shape[0]: raise ValueError("The inconsistency matrix and linkage matrix each " "have a different number of rows.") MI = np.zeros((n - 1,)) [Z, R] = _copy_arrays_if_base_present([Z, R]) _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3) return MI def maxRstat(Z, R, i): """ Return the maximum statistic for each non-singleton cluster and its descendents. Parameters ---------- Z : array_like The hierarchical clustering encoded as a matrix. See `linkage` for more information. R : array_like The inconsistency matrix. i : int The column of `R` to use as the statistic. Returns ------- MR : ndarray Calculates the maximum statistic for the i'th column of the inconsistency matrix `R` for each non-singleton cluster node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where ``Q(j)`` the set of all node ids corresponding to nodes below and including ``j``. """ Z = np.asarray(Z, order='c') R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') if type(i) is not int: raise TypeError('The third argument must be an integer.') if i < 0 or i > 3: raise ValueError('i must be an integer between 0 and 3 inclusive.') if Z.shape[0] != R.shape[0]: raise ValueError("The inconsistency matrix and linkage matrix each " "have a different number of rows.") n = Z.shape[0] + 1 MR = np.zeros((n - 1,)) [Z, R] = _copy_arrays_if_base_present([Z, R]) _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i) return MR def leaders(Z, T): """ Return the root nodes in a hierarchical clustering. Returns the root nodes in a hierarchical clustering corresponding to a cut defined by a flat cluster assignment vector ``T``. See the ``fcluster`` function for more information on the format of ``T``. For each flat cluster :math:`j` of the :math:`k` flat clusters represented in the n-sized flat cluster assignment vector ``T``, this function finds the lowest cluster node :math:`i` in the linkage tree Z such that: * leaf descendents belong only to flat cluster j (i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where :math:`S(i)` is the set of leaf ids of leaf nodes descendent with cluster node :math:`i`) * there does not exist a leaf that is not descendent with :math:`i` that also belongs to cluster :math:`j` (i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If this condition is violated, ``T`` is not a valid cluster assignment vector, and an exception will be thrown. Parameters ---------- Z : ndarray The hierarchical clustering encoded as a matrix. See `linkage` for more information. T : ndarray The flat cluster assignment vector. Returns ------- L : ndarray The leader linkage node id's stored as a k-element 1-D array where ``k`` is the number of flat clusters found in ``T``. ``L[j]=i`` is the linkage cluster node id that is the leader of flat cluster with id M[j]. If ``i < n``, ``i`` corresponds to an original observation, otherwise it corresponds to a non-singleton cluster. For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with id 8's leader is linkage node 2. M : ndarray The leader linkage node id's stored as a k-element 1-D array where ``k`` is the number of flat clusters found in ``T``. This allows the set of flat cluster ids to be any arbitrary set of ``k`` integers. """ Z = np.asarray(Z, order='c') T = np.asarray(T, order='c') if type(T) != np.ndarray or T.dtype != 'i': raise TypeError('T must be a one-dimensional numpy array of integers.') is_valid_linkage(Z, throw=True, name='Z') if len(T) != Z.shape[0] + 1: raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.') Cl = np.unique(T) kk = len(Cl) L = np.zeros((kk,), dtype='i') M = np.zeros((kk,), dtype='i') n = Z.shape[0] + 1 [Z, T] = _copy_arrays_if_base_present([Z, T]) s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n)) if s >= 0: raise ValueError(('T is not a valid assignment vector. Error found ' 'when examining linkage node %d (< 2n-1).') % s) return (L, M)
103,637
33.136364
102
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/vq.py
""" ==================================================================== K-means clustering and vector quantization (:mod:`scipy.cluster.vq`) ==================================================================== Provides routines for k-means clustering, generating code books from k-means models, and quantizing vectors by comparing them with centroids in a code book. .. autosummary:: :toctree: generated/ whiten -- Normalize a group of observations so each feature has unit variance vq -- Calculate code book membership of a set of observation vectors kmeans -- Performs k-means on a set of observation vectors forming k clusters kmeans2 -- A different implementation of k-means with more methods -- for initializing centroids Background information ====================== The k-means algorithm takes as input the number of clusters to generate, k, and a set of observation vectors to cluster. It returns a set of centroids, one for each of the k clusters. An observation vector is classified with the cluster number or centroid index of the centroid closest to it. A vector v belongs to cluster i if it is closer to centroid i than any other centroids. If v belongs to i, we say centroid i is the dominating centroid of v. The k-means algorithm tries to minimize distortion, which is defined as the sum of the squared distances between each observation vector and its dominating centroid. Each step of the k-means algorithm refines the choices of centroids to reduce distortion. The change in distortion is used as a stopping criterion: when the change is lower than a threshold, the k-means algorithm is not making sufficient progress and terminates. One can also define a maximum number of iterations. Since vector quantization is a natural application for k-means, information theory terminology is often used. The centroid index or cluster index is also referred to as a "code" and the table mapping codes to centroids and vice versa is often referred as a "code book". The result of k-means, a set of centroids, can be used to quantize vectors. Quantization aims to find an encoding of vectors that reduces the expected distortion. All routines expect obs to be a M by N array where the rows are the observation vectors. The codebook is a k by N array where the i'th row is the centroid of code word i. The observation vectors and centroids have the same feature dimension. As an example, suppose we wish to compress a 24-bit color image (each pixel is represented by one byte for red, one for blue, and one for green) before sending it over the web. By using a smaller 8-bit encoding, we can reduce the amount of data by two thirds. Ideally, the colors for each of the 256 possible 8-bit encoding values should be chosen to minimize distortion of the color. Running k-means with k=256 generates a code book of 256 codes, which fills up all possible 8-bit sequences. Instead of sending a 3-byte value for each pixel, the 8-bit centroid index (or code word) of the dominating centroid is transmitted. The code book is also sent over the wire so each 8-bit code can be translated back to a 24-bit pixel value representation. If the image of interest was of an ocean, we would expect many 24-bit blues to be represented by 8-bit codes. If it was an image of a human face, more flesh tone colors would be represented in the code book. """ from __future__ import division, print_function, absolute_import import warnings import numpy as np from collections import deque from scipy._lib._util import _asarray_validated from scipy._lib.six import xrange from scipy.spatial.distance import cdist from . import _vq __docformat__ = 'restructuredtext' __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] class ClusterError(Exception): pass def whiten(obs, check_finite=True): """ Normalize a group of observations on a per feature basis. Before running k-means, it is beneficial to rescale each feature dimension of the observation set with whitening. Each feature is divided by its standard deviation across all observations to give it unit variance. Parameters ---------- obs : ndarray Each row of the array is an observation. The columns are the features seen during each observation. >>> # f0 f1 f2 >>> obs = [[ 1., 1., 1.], #o0 ... [ 2., 2., 2.], #o1 ... [ 3., 3., 3.], #o2 ... [ 4., 4., 4.]] #o3 check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- result : ndarray Contains the values in `obs` scaled by the standard deviation of each column. Examples -------- >>> from scipy.cluster.vq import whiten >>> features = np.array([[1.9, 2.3, 1.7], ... [1.5, 2.5, 2.2], ... [0.8, 0.6, 1.7,]]) >>> whiten(features) array([[ 4.17944278, 2.69811351, 7.21248917], [ 3.29956009, 2.93273208, 9.33380951], [ 1.75976538, 0.7038557 , 7.21248917]]) """ obs = _asarray_validated(obs, check_finite=check_finite) std_dev = obs.std(axis=0) zero_std_mask = std_dev == 0 if zero_std_mask.any(): std_dev[zero_std_mask] = 1.0 warnings.warn("Some columns have standard deviation zero. " "The values of these columns will not change.", RuntimeWarning) return obs / std_dev def vq(obs, code_book, check_finite=True): """ Assign codes from a code book to observations. Assigns a code from a code book to each observation. Each observation vector in the 'M' by 'N' `obs` array is compared with the centroids in the code book and assigned the code of the closest centroid. The features in `obs` should have unit variance, which can be achieved by passing them through the whiten function. The code book can be created with the k-means algorithm or a different encoding algorithm. Parameters ---------- obs : ndarray Each row of the 'M' x 'N' array is an observation. The columns are the "features" seen during each observation. The features must be whitened first using the whiten function or something equivalent. code_book : ndarray The code book is usually generated using the k-means algorithm. Each row of the array holds a different code, and the columns are the features of the code. >>> # f0 f1 f2 f3 >>> code_book = [ ... [ 1., 2., 3., 4.], #c0 ... [ 1., 2., 3., 4.], #c1 ... [ 1., 2., 3., 4.]] #c2 check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- code : ndarray A length M array holding the code book index for each observation. dist : ndarray The distortion (distance) between the observation and its nearest code. Examples -------- >>> from numpy import array >>> from scipy.cluster.vq import vq >>> code_book = array([[1.,1.,1.], ... [2.,2.,2.]]) >>> features = array([[ 1.9,2.3,1.7], ... [ 1.5,2.5,2.2], ... [ 0.8,0.6,1.7]]) >>> vq(features,code_book) (array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239])) """ obs = _asarray_validated(obs, check_finite=check_finite) code_book = _asarray_validated(code_book, check_finite=check_finite) ct = np.common_type(obs, code_book) c_obs = obs.astype(ct, copy=False) c_code_book = code_book.astype(ct, copy=False) if np.issubdtype(ct, np.float64) or np.issubdtype(ct, np.float32): return _vq.vq(c_obs, c_code_book) return py_vq(obs, code_book, check_finite=False) def py_vq(obs, code_book, check_finite=True): """ Python version of vq algorithm. The algorithm computes the euclidian distance between each observation and every frame in the code_book. Parameters ---------- obs : ndarray Expects a rank 2 array. Each row is one observation. code_book : ndarray Code book to use. Same format than obs. Should have same number of features (eg columns) than obs. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- code : ndarray code[i] gives the label of the ith obversation, that its code is code_book[code[i]]. mind_dist : ndarray min_dist[i] gives the distance between the ith observation and its corresponding code. Notes ----- This function is slower than the C version but works for all input types. If the inputs have the wrong types for the C versions of the function, this one is called as a last resort. It is about 20 times slower than the C version. """ obs = _asarray_validated(obs, check_finite=check_finite) code_book = _asarray_validated(code_book, check_finite=check_finite) if obs.ndim != code_book.ndim: raise ValueError("Observation and code_book should have the same rank") if obs.ndim == 1: obs = obs[:, np.newaxis] code_book = code_book[:, np.newaxis] dist = cdist(obs, code_book) code = dist.argmin(axis=1) min_dist = dist[np.arange(len(code)), code] return code, min_dist # py_vq2 was equivalent to py_vq py_vq2 = np.deprecate(py_vq, old_name='py_vq2', new_name='py_vq') def _kmeans(obs, guess, thresh=1e-5): """ "raw" version of k-means. Returns ------- code_book the lowest distortion codebook found. avg_dist the average distance a observation is from a code in the book. Lower means the code_book matches the data better. See Also -------- kmeans : wrapper around k-means Examples -------- Note: not whitened in this example. >>> from numpy import array >>> from scipy.cluster.vq import _kmeans >>> features = array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 1.0,1.0]]) >>> book = array((features[0],features[2])) >>> _kmeans(features,book) (array([[ 1.7 , 2.4 ], [ 0.73333333, 1.13333333]]), 0.40563916697728591) """ code_book = np.asarray(guess) diff = np.inf prev_avg_dists = deque([diff], maxlen=2) while diff > thresh: # compute membership and distances between obs and code_book obs_code, distort = vq(obs, code_book, check_finite=False) prev_avg_dists.append(distort.mean(axis=-1)) # recalc code_book as centroids of associated obs code_book, has_members = _vq.update_cluster_means(obs, obs_code, code_book.shape[0]) code_book = code_book[has_members] diff = prev_avg_dists[0] - prev_avg_dists[1] return code_book, prev_avg_dists[1] def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True): """ Performs k-means on a set of observation vectors forming k clusters. The k-means algorithm adjusts the centroids until sufficient progress cannot be made, i.e. the change in distortion since the last iteration is less than some threshold. This yields a code book mapping centroids to codes and vice versa. Distortion is defined as the sum of the squared differences between the observations and the corresponding centroid. Parameters ---------- obs : ndarray Each row of the M by N array is an observation vector. The columns are the features seen during each observation. The features must be whitened first with the `whiten` function. k_or_guess : int or ndarray The number of centroids to generate. A code is assigned to each centroid, which is also the row index of the centroid in the code_book matrix generated. The initial k centroids are chosen by randomly selecting observations from the observation matrix. Alternatively, passing a k by N array specifies the initial k centroids. iter : int, optional The number of times to run k-means, returning the codebook with the lowest distortion. This argument is ignored if initial centroids are specified with an array for the ``k_or_guess`` parameter. This parameter does not represent the number of iterations of the k-means algorithm. thresh : float, optional Terminates the k-means algorithm if the change in distortion since the last k-means iteration is less than or equal to thresh. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- codebook : ndarray A k by N array of k centroids. The i'th centroid codebook[i] is represented with the code i. The centroids and codes generated represent the lowest distortion seen, not necessarily the globally minimal distortion. distortion : float The distortion between the observations passed and the centroids generated. See Also -------- kmeans2 : a different implementation of k-means clustering with more methods for generating initial centroids but without using a distortion change threshold as a stopping criterion. whiten : must be called prior to passing an observation matrix to kmeans. Examples -------- >>> from numpy import array >>> from scipy.cluster.vq import vq, kmeans, whiten >>> import matplotlib.pyplot as plt >>> features = array([[ 1.9,2.3], ... [ 1.5,2.5], ... [ 0.8,0.6], ... [ 0.4,1.8], ... [ 0.1,0.1], ... [ 0.2,1.8], ... [ 2.0,0.5], ... [ 0.3,1.5], ... [ 1.0,1.0]]) >>> whitened = whiten(features) >>> book = np.array((whitened[0],whitened[2])) >>> kmeans(whitened,book) (array([[ 2.3110306 , 2.86287398], # random [ 0.93218041, 1.24398691]]), 0.85684700941625547) >>> from numpy import random >>> random.seed((1000,2000)) >>> codes = 3 >>> kmeans(whitened,codes) (array([[ 2.3110306 , 2.86287398], # random [ 1.32544402, 0.65607529], [ 0.40782893, 2.02786907]]), 0.5196582527686241) >>> # Create 50 datapoints in two clusters a and b >>> pts = 50 >>> a = np.random.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts) >>> b = np.random.multivariate_normal([30, 10], ... [[10, 2], [2, 1]], ... size=pts) >>> features = np.concatenate((a, b)) >>> # Whiten data >>> whitened = whiten(features) >>> # Find 2 clusters in the data >>> codebook, distortion = kmeans(whitened, 2) >>> # Plot whitened data and cluster centers in red >>> plt.scatter(whitened[:, 0], whitened[:, 1]) >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r') >>> plt.show() """ obs = _asarray_validated(obs, check_finite=check_finite) if iter < 1: raise ValueError("iter must be at least 1, got %s" % iter) # Determine whether a count (scalar) or an initial guess (array) was passed. if not np.isscalar(k_or_guess): guess = _asarray_validated(k_or_guess, check_finite=check_finite) if guess.size < 1: raise ValueError("Asked for 0 clusters. Initial book was %s" % guess) return _kmeans(obs, guess, thresh=thresh) # k_or_guess is a scalar, now verify that it's an integer k = int(k_or_guess) if k != k_or_guess: raise ValueError("If k_or_guess is a scalar, it must be an integer.") if k < 1: raise ValueError("Asked for %d clusters." % k) # initialize best distance value to a large value best_dist = np.inf for i in xrange(iter): # the initial code book is randomly selected from observations guess = _kpoints(obs, k) book, dist = _kmeans(obs, guess, thresh=thresh) if dist < best_dist: best_book = book best_dist = dist return best_book, best_dist def _kpoints(data, k): """Pick k points at random in data (one row = one observation). Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. """ idx = np.random.choice(data.shape[0], size=k, replace=False) return data[idx] def _krandinit(data, k): """Returns k samples of a random variable which parameters depend on data. More precisely, it returns k observations sampled from a Gaussian random variable which mean and covariances are the one estimated from data. Parameters ---------- data : ndarray Expect a rank 1 or 2 array. Rank 1 are assumed to describe one dimensional data, rank 2 multidimensional data, in which case one row is one observation. k : int Number of samples to generate. """ mu = data.mean(axis=0) if data.ndim == 1: cov = np.cov(data) x = np.random.randn(k) x *= np.sqrt(cov) elif data.shape[1] > data.shape[0]: # initialize when the covariance matrix is rank deficient _, s, vh = np.linalg.svd(data - mu, full_matrices=False) x = np.random.randn(k, s.size) sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1) x = x.dot(sVh) else: cov = np.atleast_2d(np.cov(data, rowvar=False)) # k rows, d cols (one row = one obs) # Generate k sample of a random variable ~ Gaussian(mu, cov) x = np.random.randn(k, mu.size) x = x.dot(np.linalg.cholesky(cov).T) x += mu return x _valid_init_meth = {'random': _krandinit, 'points': _kpoints} def _missing_warn(): """Print a warning when called.""" warnings.warn("One of the clusters is empty. " "Re-run kmeans with a different initialization.") def _missing_raise(): """raise a ClusterError when called.""" raise ClusterError("One of the clusters is empty. " "Re-run kmeans with a different initialization.") _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise} def kmeans2(data, k, iter=10, thresh=1e-5, minit='random', missing='warn', check_finite=True): """ Classify a set of observations into k clusters using the k-means algorithm. The algorithm attempts to minimize the Euclidian distance between observations and centroids. Several initialization methods are included. Parameters ---------- data : ndarray A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length 'M' array of 'M' one-dimensional observations. k : int or ndarray The number of clusters to form as well as the number of centroids to generate. If `minit` initialization string is 'matrix', or if a ndarray is given instead, it is interpreted as initial cluster to use instead. iter : int, optional Number of iterations of the k-means algorithm to run. Note that this differs in meaning from the iters parameter to the kmeans function. thresh : float, optional (not used yet) minit : str, optional Method for initialization. Available methods are 'random', 'points', and 'matrix': 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. 'points': choose k observations (rows) at random from data for the initial centroids. 'matrix': interpret the k parameter as a k by M (or length k array for one-dimensional data) array of initial centroids. missing : str, optional Method to deal with empty clusters. Available methods are 'warn' and 'raise': 'warn': give a warning and continue. 'raise': raise an ClusterError and terminate the algorithm. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Default: True Returns ------- centroid : ndarray A 'k' by 'N' array of centroids found at the last iteration of k-means. label : ndarray label[i] is the code or index of the centroid the i'th observation is closest to. """ if int(iter) < 1: raise ValueError("Invalid iter (%s), " "must be a positive integer." % iter) try: miss_meth = _valid_miss_meth[missing] except KeyError: raise ValueError("Unknown missing method %r" % (missing,)) data = _asarray_validated(data, check_finite=check_finite) if data.ndim == 1: d = 1 elif data.ndim == 2: d = data.shape[1] else: raise ValueError("Input of rank > 2 is not supported.") if data.size < 1: raise ValueError("Empty input is not supported.") # If k is not a single value it should be compatible with data's shape if minit == 'matrix' or not np.isscalar(k): code_book = np.array(k, copy=True) if data.ndim != code_book.ndim: raise ValueError("k array doesn't match data rank") nc = len(code_book) if data.ndim > 1 and code_book.shape[1] != d: raise ValueError("k array doesn't match data dimension") else: nc = int(k) if nc < 1: raise ValueError("Cannot ask kmeans2 for %d clusters" " (k was %s)" % (nc, k)) elif nc != k: warnings.warn("k was not an integer, was converted.") try: init_meth = _valid_init_meth[minit] except KeyError: raise ValueError("Unknown init method %r" % (minit,)) else: code_book = init_meth(data, k) for i in xrange(iter): # Compute the nearest neighbor for each obs using the current code book label = vq(data, code_book)[0] # Update the code book by computing centroids new_code_book, has_members = _vq.update_cluster_means(data, label, nc) if not has_members.all(): miss_meth() # Set the empty clusters to their previous positions new_code_book[~has_members] = code_book[~has_members] code_book = new_code_book return code_book, label
23,929
35.646248
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/setup.py
from __future__ import division, print_function, absolute_import import sys if sys.version_info[0] >= 3: DEFINE_MACROS = [("SCIPY_PY3K", None)] else: DEFINE_MACROS = [] def configuration(parent_package='', top_path=None): from numpy.distutils.system_info import get_info from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs config = Configuration('cluster', parent_package, top_path) blas_opt = get_info('lapack_opt') config.add_data_dir('tests') config.add_extension('_vq', sources=[('_vq.c')], include_dirs=[get_numpy_include_dirs()], extra_info=blas_opt) config.add_extension('_hierarchy', sources=[('_hierarchy.c')], include_dirs=[get_numpy_include_dirs()]) config.add_extension('_optimal_leaf_ordering', sources=[('_optimal_leaf_ordering.c')], include_dirs=[get_numpy_include_dirs()]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,058
26.153846
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/__init__.py
""" ========================================= Clustering package (:mod:`scipy.cluster`) ========================================= .. currentmodule:: scipy.cluster :mod:`scipy.cluster.vq` Clustering algorithms are useful in information theory, target detection, communications, compression, and other areas. The `vq` module only supports vector quantization and the k-means algorithms. :mod:`scipy.cluster.hierarchy` The `hierarchy` module provides functions for hierarchical and agglomerative clustering. Its features include generating hierarchical clusters from distance matrices, calculating statistics on clusters, cutting linkages to generate flat clusters, and visualizing clusters with dendrograms. """ from __future__ import division, print_function, absolute_import __all__ = ['vq', 'hierarchy'] from . import vq, hierarchy from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
938
28.34375
73
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/tests/test_hierarchy.py
# # Author: Damian Eads # Date: April 17, 2008 # # Copyright (C) 2008 Damian Eads # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. The name of the author may not be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns import pytest from pytest import raises as assert_raises from scipy._lib.six import xrange, u import scipy.cluster.hierarchy from scipy.cluster.hierarchy import ( ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders, complete, weighted, centroid, correspond, is_monotonic, maxdists, maxinconsts, maxRstat, is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram, set_link_color_palette, cut_tree, optimal_leaf_ordering, _order_cluster_tree, _hierarchy, _LINKAGE_METHODS) from scipy.spatial.distance import pdist from scipy.cluster._hierarchy import Heap from . import hierarchy_test_data # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so # check if it's available try: import matplotlib # and set the backend to be Agg (no gui) matplotlib.use('Agg') # before importing pyplot import matplotlib.pyplot as plt have_matplotlib = True except: have_matplotlib = False class TestLinkage(object): def test_linkage_non_finite_elements_in_distance_matrix(self): # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf). # Exception expected. y = np.zeros((6,)) y[0] = np.nan assert_raises(ValueError, linkage, y) def test_linkage_empty_distance_matrix(self): # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected. y = np.zeros((0,)) assert_raises(ValueError, linkage, y) def test_linkage_tdist(self): for method in ['single', 'complete', 'average', 'weighted', u('single')]: self.check_linkage_tdist(method) def check_linkage_tdist(self, method): # Tests linkage(Y, method) on the tdist data set. Z = linkage(hierarchy_test_data.ytdist, method) expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method) assert_allclose(Z, expectedZ, atol=1e-10) def test_linkage_X(self): for method in ['centroid', 'median', 'ward']: self.check_linkage_q(method) def check_linkage_q(self, method): # Tests linkage(Y, method) on the Q data set. Z = linkage(hierarchy_test_data.X, method) expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method) assert_allclose(Z, expectedZ, atol=1e-06) y = scipy.spatial.distance.pdist(hierarchy_test_data.X, metric="euclidean") Z = linkage(y, method) assert_allclose(Z, expectedZ, atol=1e-06) def test_compare_with_trivial(self): rng = np.random.RandomState(0) n = 20 X = rng.rand(n, 2) d = pdist(X) for method, code in _LINKAGE_METHODS.items(): Z_trivial = _hierarchy.linkage(d, n, code) Z = linkage(d, method) assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15) def test_optimal_leaf_ordering(self): Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True) expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo') assert_allclose(Z, expectedZ, atol=1e-10) class TestLinkageTies(object): _expectations = { 'single': np.array([[0, 1, 1.41421356, 2], [2, 3, 1.41421356, 3]]), 'complete': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.82842712, 3]]), 'average': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.12132034, 3]]), 'weighted': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.12132034, 3]]), 'centroid': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.12132034, 3]]), 'median': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.12132034, 3]]), 'ward': np.array([[0, 1, 1.41421356, 2], [2, 3, 2.44948974, 3]]), } def test_linkage_ties(self): for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']: self.check_linkage_ties(method) def check_linkage_ties(self, method): X = np.array([[-1, -1], [0, 0], [1, 1]]) Z = linkage(X, method=method) expectedZ = self._expectations[method] assert_allclose(Z, expectedZ, atol=1e-06) class TestInconsistent(object): def test_inconsistent_tdist(self): for depth in hierarchy_test_data.inconsistent_ytdist: self.check_inconsistent_tdist(depth) def check_inconsistent_tdist(self, depth): Z = hierarchy_test_data.linkage_ytdist_single assert_allclose(inconsistent(Z, depth), hierarchy_test_data.inconsistent_ytdist[depth]) class TestCopheneticDistance(object): def test_linkage_cophenet_tdist_Z(self): # Tests cophenet(Z) on tdist data set. expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]) Z = hierarchy_test_data.linkage_ytdist_single M = cophenet(Z) assert_allclose(M, expectedM, atol=1e-10) def test_linkage_cophenet_tdist_Z_Y(self): # Tests cophenet(Z, Y) on tdist data set. Z = hierarchy_test_data.linkage_ytdist_single (c, M) = cophenet(Z, hierarchy_test_data.ytdist) expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]) expectedc = 0.639931296433393415057366837573 assert_allclose(c, expectedc, atol=1e-10) assert_allclose(M, expectedM, atol=1e-10) class TestMLabLinkageConversion(object): def test_mlab_linkage_conversion_empty(self): # Tests from/to_mlab_linkage on empty linkage array. X = np.asarray([]) assert_equal(from_mlab_linkage([]), X) assert_equal(to_mlab_linkage([]), X) def test_mlab_linkage_conversion_single_row(self): # Tests from/to_mlab_linkage on linkage array with single row. Z = np.asarray([[0., 1., 3., 2.]]) Zm = [[1, 2, 3]] assert_equal(from_mlab_linkage(Zm), Z) assert_equal(to_mlab_linkage(Z), Zm) def test_mlab_linkage_conversion_multiple_rows(self): # Tests from/to_mlab_linkage on linkage array with multiple rows. Zm = np.asarray([[3, 6, 138], [4, 5, 219], [1, 8, 255], [2, 9, 268], [7, 10, 295]]) Z = np.array([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 255., 3.], [1., 8., 268., 4.], [6., 9., 295., 6.]], dtype=np.double) assert_equal(from_mlab_linkage(Zm), Z) assert_equal(to_mlab_linkage(Z), Zm) class TestFcluster(object): def test_fclusterdata(self): for t in hierarchy_test_data.fcluster_inconsistent: self.check_fclusterdata(t, 'inconsistent') for t in hierarchy_test_data.fcluster_distance: self.check_fclusterdata(t, 'distance') for t in hierarchy_test_data.fcluster_maxclust: self.check_fclusterdata(t, 'maxclust') def check_fclusterdata(self, t, criterion): # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set. expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] X = hierarchy_test_data.Q_X T = fclusterdata(X, criterion=criterion, t=t) assert_(is_isomorphic(T, expectedT)) def test_fcluster(self): for t in hierarchy_test_data.fcluster_inconsistent: self.check_fcluster(t, 'inconsistent') for t in hierarchy_test_data.fcluster_distance: self.check_fcluster(t, 'distance') for t in hierarchy_test_data.fcluster_maxclust: self.check_fcluster(t, 'maxclust') def check_fcluster(self, t, criterion): # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set. expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, criterion=criterion, t=t) assert_(is_isomorphic(T, expectedT)) def test_fcluster_monocrit(self): for t in hierarchy_test_data.fcluster_distance: self.check_fcluster_monocrit(t) for t in hierarchy_test_data.fcluster_maxclust: self.check_fcluster_maxclust_monocrit(t) def check_fcluster_monocrit(self, t): expectedT = hierarchy_test_data.fcluster_distance[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z)) assert_(is_isomorphic(T, expectedT)) def check_fcluster_maxclust_monocrit(self, t): expectedT = hierarchy_test_data.fcluster_maxclust[t] Z = single(hierarchy_test_data.Q_X) T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z)) assert_(is_isomorphic(T, expectedT)) class TestLeaders(object): def test_leaders_single(self): # Tests leaders using a flat clustering generated by single linkage. X = hierarchy_test_data.Q_X Y = pdist(X) Z = linkage(Y) T = fcluster(Z, criterion='maxclust', t=3) Lright = (np.array([53, 55, 56]), np.array([2, 3, 1])) L = leaders(Z, T) assert_equal(L, Lright) class TestIsIsomorphic(object): def test_is_isomorphic_1(self): # Tests is_isomorphic on test case #1 (one flat cluster, different labellings) a = [1, 1, 1] b = [2, 2, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_2(self): # Tests is_isomorphic on test case #2 (two flat clusters, different labelings) a = [1, 7, 1] b = [2, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_3(self): # Tests is_isomorphic on test case #3 (no flat clusters) a = [] b = [] assert_(is_isomorphic(a, b)) def test_is_isomorphic_4A(self): # Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic) a = [1, 2, 3] b = [1, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_4B(self): # Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic) a = [1, 2, 3, 3] b = [1, 3, 2, 3] assert_(is_isomorphic(a, b) == False) assert_(is_isomorphic(b, a) == False) def test_is_isomorphic_4C(self): # Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic) a = [7, 2, 3] b = [6, 3, 2] assert_(is_isomorphic(a, b)) assert_(is_isomorphic(b, a)) def test_is_isomorphic_5(self): # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random # clusters, random permutation of the labeling). for nc in [2, 3, 5]: self.help_is_isomorphic_randperm(1000, nc) def test_is_isomorphic_6(self): # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random # clusters, random permutation of the labeling, slightly # nonisomorphic.) for nc in [2, 3, 5]: self.help_is_isomorphic_randperm(1000, nc, True, 5) def test_is_isomorphic_7(self): # Regression test for gh-6271 assert_(not is_isomorphic([1, 2, 3], [1, 1, 1])) def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0): for k in range(3): a = np.int_(np.random.rand(nobs) * nclusters) b = np.zeros(a.size, dtype=np.int_) P = np.random.permutation(nclusters) for i in xrange(0, a.shape[0]): b[i] = P[a[i]] if noniso: Q = np.random.permutation(nobs) b[Q[0:nerrors]] += 1 b[Q[0:nerrors]] %= nclusters assert_(is_isomorphic(a, b) == (not noniso)) assert_(is_isomorphic(b, a) == (not noniso)) class TestIsValidLinkage(object): def test_is_valid_linkage_various_size(self): for nrow, ncol, valid in [(2, 5, False), (2, 3, False), (1, 4, True), (2, 4, True)]: self.check_is_valid_linkage_various_size(nrow, ncol, valid) def check_is_valid_linkage_various_size(self, nrow, ncol, valid): # Tests is_valid_linkage(Z) with linkage matrics of various sizes Z = np.asarray([[0, 1, 3.0, 2, 5], [3, 2, 4.0, 3, 3]], dtype=np.double) Z = Z[:nrow, :ncol] assert_(is_valid_linkage(Z) == valid) if not valid: assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_int_type(self): # Tests is_valid_linkage(Z) with integer type. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=int) assert_(is_valid_linkage(Z) == False) assert_raises(TypeError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_empty(self): # Tests is_valid_linkage(Z) with empty linkage. Z = np.zeros((0, 4), dtype=np.double) assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) assert_(is_valid_linkage(Z) == True) def test_is_valid_linkage_4_and_up_neg_index_left(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative indices (left). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,0] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_index_right(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative indices (right). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,1] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_dist(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative distances. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,2] = -0.5 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) def test_is_valid_linkage_4_and_up_neg_counts(self): # Tests is_valid_linkage(Z) on linkage on observation sets between # sizes 4 and 15 (step size 3) with negative counts. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) Z[i//2,3] = -2 assert_(is_valid_linkage(Z) == False) assert_raises(ValueError, is_valid_linkage, Z, throw=True) class TestIsValidInconsistent(object): def test_is_valid_im_int_type(self): # Tests is_valid_im(R) with integer type. R = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=int) assert_(is_valid_im(R) == False) assert_raises(TypeError, is_valid_im, R, throw=True) def test_is_valid_im_various_size(self): for nrow, ncol, valid in [(2, 5, False), (2, 3, False), (1, 4, True), (2, 4, True)]: self.check_is_valid_im_various_size(nrow, ncol, valid) def check_is_valid_im_various_size(self, nrow, ncol, valid): # Tests is_valid_im(R) with linkage matrics of various sizes R = np.asarray([[0, 1, 3.0, 2, 5], [3, 2, 4.0, 3, 3]], dtype=np.double) R = R[:nrow, :ncol] assert_(is_valid_im(R) == valid) if not valid: assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_empty(self): # Tests is_valid_im(R) with empty inconsistency matrix. R = np.zeros((0, 4), dtype=np.double) assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) assert_(is_valid_im(R) == True) def test_is_valid_im_4_and_up_neg_index_left(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link height means. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,0] = -2.0 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up_neg_index_right(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link height standard deviations. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,1] = -2.0 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) def test_is_valid_im_4_and_up_neg_dist(self): # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 # (step size 3) with negative link counts. for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) R = inconsistent(Z) R[i//2,2] = -0.5 assert_(is_valid_im(R) == False) assert_raises(ValueError, is_valid_im, R, throw=True) class TestNumObsLinkage(object): def test_num_obs_linkage_empty(self): # Tests num_obs_linkage(Z) with empty linkage. Z = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, num_obs_linkage, Z) def test_num_obs_linkage_1x4(self): # Tests num_obs_linkage(Z) on linkage over 2 observations. Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) assert_equal(num_obs_linkage(Z), 2) def test_num_obs_linkage_2x4(self): # Tests num_obs_linkage(Z) on linkage over 3 observations. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.double) assert_equal(num_obs_linkage(Z), 3) def test_num_obs_linkage_4_and_up(self): # Tests num_obs_linkage(Z) on linkage on observation sets between sizes # 4 and 15 (step size 3). for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) assert_equal(num_obs_linkage(Z), i) class TestLeavesList(object): def test_leaves_list_1x4(self): # Tests leaves_list(Z) on a 1x4 linkage. Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double) to_tree(Z) assert_equal(leaves_list(Z), [0, 1]) def test_leaves_list_2x4(self): # Tests leaves_list(Z) on a 2x4 linkage. Z = np.asarray([[0, 1, 3.0, 2], [3, 2, 4.0, 3]], dtype=np.double) to_tree(Z) assert_equal(leaves_list(Z), [0, 1, 2]) def test_leaves_list_Q(self): for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']: self.check_leaves_list_Q(method) def check_leaves_list_Q(self, method): # Tests leaves_list(Z) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) node = to_tree(Z) assert_equal(node.pre_order(), leaves_list(Z)) def test_Q_subtree_pre_order(self): # Tests that pre_order() works when called on sub-trees. X = hierarchy_test_data.Q_X Z = linkage(X, 'single') node = to_tree(Z) assert_equal(node.pre_order(), (node.get_left().pre_order() + node.get_right().pre_order())) class TestCorrespond(object): def test_correspond_empty(self): # Tests correspond(Z, y) with empty linkage and condensed distance matrix. y = np.zeros((0,)) Z = np.zeros((0,4)) assert_raises(ValueError, correspond, Z, y) def test_correspond_2_and_up(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. for i in xrange(2, 4): y = np.random.rand(i*(i-1)//2) Z = linkage(y) assert_(correspond(Z, y)) for i in xrange(4, 15, 3): y = np.random.rand(i*(i-1)//2) Z = linkage(y) assert_(correspond(Z, y)) def test_correspond_4_and_up(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. Correspondence should be false. for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) + list(zip(list(range(3, 5)), list(range(2, 4))))): y = np.random.rand(i*(i-1)//2) y2 = np.random.rand(j*(j-1)//2) Z = linkage(y) Z2 = linkage(y2) assert_equal(correspond(Z, y2), False) assert_equal(correspond(Z2, y), False) def test_correspond_4_and_up_2(self): # Tests correspond(Z, y) on linkage and CDMs over observation sets of # different sizes. Correspondence should be false. for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) + list(zip(list(range(2, 7)), list(range(16, 21))))): y = np.random.rand(i*(i-1)//2) y2 = np.random.rand(j*(j-1)//2) Z = linkage(y) Z2 = linkage(y2) assert_equal(correspond(Z, y2), False) assert_equal(correspond(Z2, y), False) def test_num_obs_linkage_multi_matrix(self): # Tests num_obs_linkage with observation matrices of multiple sizes. for n in xrange(2, 10): X = np.random.rand(n, 4) Y = pdist(X) Z = linkage(Y) assert_equal(num_obs_linkage(Z), n) class TestIsMonotonic(object): def test_is_monotonic_empty(self): # Tests is_monotonic(Z) on an empty linkage. Z = np.zeros((0, 4)) assert_raises(ValueError, is_monotonic, Z) def test_is_monotonic_1x4(self): # Tests is_monotonic(Z) on 1x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double) assert_equal(is_monotonic(Z), True) def test_is_monotonic_2x4_T(self): # Tests is_monotonic(Z) on 2x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 3]], dtype=np.double) assert_equal(is_monotonic(Z), True) def test_is_monotonic_2x4_F(self): # Tests is_monotonic(Z) on 2x4 linkage. Expecting False. Z = np.asarray([[0, 1, 0.4, 2], [2, 3, 0.3, 3]], dtype=np.double) assert_equal(is_monotonic(Z), False) def test_is_monotonic_3x4_T(self): # Tests is_monotonic(Z) on 3x4 linkage. Expecting True. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 2], [4, 5, 0.6, 4]], dtype=np.double) assert_equal(is_monotonic(Z), True) def test_is_monotonic_3x4_F1(self): # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False. Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.2, 2], [4, 5, 0.6, 4]], dtype=np.double) assert_equal(is_monotonic(Z), False) def test_is_monotonic_3x4_F2(self): # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False. Z = np.asarray([[0, 1, 0.8, 2], [2, 3, 0.4, 2], [4, 5, 0.6, 4]], dtype=np.double) assert_equal(is_monotonic(Z), False) def test_is_monotonic_3x4_F3(self): # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False Z = np.asarray([[0, 1, 0.3, 2], [2, 3, 0.4, 2], [4, 5, 0.2, 4]], dtype=np.double) assert_equal(is_monotonic(Z), False) def test_is_monotonic_tdist_linkage1(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # tdist data set. Expecting True. Z = linkage(hierarchy_test_data.ytdist, 'single') assert_equal(is_monotonic(Z), True) def test_is_monotonic_tdist_linkage2(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # tdist data set. Perturbing. Expecting False. Z = linkage(hierarchy_test_data.ytdist, 'single') Z[2,2] = 0.0 assert_equal(is_monotonic(Z), False) def test_is_monotonic_Q_linkage(self): # Tests is_monotonic(Z) on clustering generated by single linkage on # Q data set. Expecting True. X = hierarchy_test_data.Q_X Z = linkage(X, 'single') assert_equal(is_monotonic(Z), True) class TestMaxDists(object): def test_maxdists_empty_linkage(self): # Tests maxdists(Z) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxdists, Z) def test_maxdists_one_cluster_linkage(self): # Tests maxdists(Z) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) MD = maxdists(Z) expectedMD = calculate_maximum_distances(Z) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxdists_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: self.check_maxdists_Q_linkage(method) def check_maxdists_Q_linkage(self, method): # Tests maxdists(Z) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) MD = maxdists(Z) expectedMD = calculate_maximum_distances(Z) assert_allclose(MD, expectedMD, atol=1e-15) class TestMaxInconsts(object): def test_maxinconsts_empty_linkage(self): # Tests maxinconsts(Z, R) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) R = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_difrow_linkage(self): # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with # different numbers of clusters. Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.random.rand(2, 4) assert_raises(ValueError, maxinconsts, Z, R) def test_maxinconsts_one_cluster_linkage(self): # Tests maxinconsts(Z, R) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxinconsts_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: self.check_maxinconsts_Q_linkage(method) def check_maxinconsts_Q_linkage(self, method): # Tests maxinconsts(Z, R) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) R = inconsistent(Z) MD = maxinconsts(Z, R) expectedMD = calculate_maximum_inconsistencies(Z, R) assert_allclose(MD, expectedMD, atol=1e-15) class TestMaxRStat(object): def test_maxRstat_invalid_index(self): for i in [3.3, -1, 4]: self.check_maxRstat_invalid_index(i) def check_maxRstat_invalid_index(self, i): # Tests maxRstat(Z, R, i). Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) if isinstance(i, int): assert_raises(ValueError, maxRstat, Z, R, i) else: assert_raises(TypeError, maxRstat, Z, R, i) def test_maxRstat_empty_linkage(self): for i in range(4): self.check_maxRstat_empty_linkage(i) def check_maxRstat_empty_linkage(self, i): # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception. Z = np.zeros((0, 4), dtype=np.double) R = np.zeros((0, 4), dtype=np.double) assert_raises(ValueError, maxRstat, Z, R, i) def test_maxRstat_difrow_linkage(self): for i in range(4): self.check_maxRstat_difrow_linkage(i) def check_maxRstat_difrow_linkage(self, i): # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with # different numbers of clusters. Expecting exception. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.random.rand(2, 4) assert_raises(ValueError, maxRstat, Z, R, i) def test_maxRstat_one_cluster_linkage(self): for i in range(4): self.check_maxRstat_one_cluster_linkage(i) def check_maxRstat_one_cluster_linkage(self, i): # Tests maxRstat(Z, R, i) on linkage with one cluster. Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double) R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double) MD = maxRstat(Z, R, 1) expectedMD = calculate_maximum_inconsistencies(Z, R, 1) assert_allclose(MD, expectedMD, atol=1e-15) def test_maxRstat_Q_linkage(self): for method in ['single', 'complete', 'ward', 'centroid', 'median']: for i in range(4): self.check_maxRstat_Q_linkage(method, i) def check_maxRstat_Q_linkage(self, method, i): # Tests maxRstat(Z, R, i) on the Q data set X = hierarchy_test_data.Q_X Z = linkage(X, method) R = inconsistent(Z) MD = maxRstat(Z, R, 1) expectedMD = calculate_maximum_inconsistencies(Z, R, 1) assert_allclose(MD, expectedMD, atol=1e-15) class TestDendrogram(object): def test_dendrogram_single_linkage_tdist(self): # Tests dendrogram calculation on single linkage of the tdist data set. Z = linkage(hierarchy_test_data.ytdist, 'single') R = dendrogram(Z, no_plot=True) leaves = R["leaves"] assert_equal(leaves, [2, 5, 1, 0, 3, 4]) def test_valid_orientation(self): Z = linkage(hierarchy_test_data.ytdist, 'single') assert_raises(ValueError, dendrogram, Z, orientation="foo") @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") def test_dendrogram_plot(self): for orientation in ['top', 'bottom', 'left', 'right']: self.check_dendrogram_plot(orientation) def check_dendrogram_plot(self, orientation): # Tests dendrogram plotting. Z = linkage(hierarchy_test_data.ytdist, 'single') expected = {'color_list': ['g', 'b', 'b', 'b', 'b'], 'dcoord': [[0.0, 138.0, 138.0, 0.0], [0.0, 219.0, 219.0, 0.0], [0.0, 255.0, 255.0, 219.0], [0.0, 268.0, 268.0, 255.0], [138.0, 295.0, 295.0, 268.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0], [45.0, 45.0, 55.0, 55.0], [35.0, 35.0, 50.0, 50.0], [25.0, 25.0, 42.5, 42.5], [10.0, 10.0, 33.75, 33.75]], 'ivl': ['2', '5', '1', '0', '3', '4'], 'leaves': [2, 5, 1, 0, 3, 4]} fig = plt.figure() ax = fig.add_subplot(221) # test that dendrogram accepts ax keyword R1 = dendrogram(Z, ax=ax, orientation=orientation) assert_equal(R1, expected) # test that dendrogram accepts and handle the leaf_font_size and # leaf_rotation keywords R1a = dendrogram(Z, ax=ax, orientation=orientation, leaf_font_size=20, leaf_rotation=90) testlabel = ( ax.get_xticklabels()[0] if orientation in ['top', 'bottom'] else ax.get_yticklabels()[0] ) assert_equal(testlabel.get_rotation(), 90) assert_equal(testlabel.get_size(), 20) R1a = dendrogram(Z, ax=ax, orientation=orientation, leaf_rotation=90) testlabel = ( ax.get_xticklabels()[0] if orientation in ['top', 'bottom'] else ax.get_yticklabels()[0] ) assert_equal(testlabel.get_rotation(), 90) R1a = dendrogram(Z, ax=ax, orientation=orientation, leaf_font_size=20) testlabel = ( ax.get_xticklabels()[0] if orientation in ['top', 'bottom'] else ax.get_yticklabels()[0] ) assert_equal(testlabel.get_size(), 20) plt.close() # test plotting to gca (will import pylab) R2 = dendrogram(Z, orientation=orientation) plt.close() assert_equal(R2, expected) @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") def test_dendrogram_truncate_mode(self): Z = linkage(hierarchy_test_data.ytdist, 'single') R = dendrogram(Z, 2, 'lastp', show_contracted=True) plt.close() assert_equal(R, {'color_list': ['b'], 'dcoord': [[0.0, 295.0, 295.0, 0.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0]], 'ivl': ['(2)', '(4)'], 'leaves': [6, 9]}) R = dendrogram(Z, 2, 'mtica', show_contracted=True) plt.close() assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'], 'dcoord': [[0.0, 138.0, 138.0, 0.0], [0.0, 255.0, 255.0, 0.0], [0.0, 268.0, 268.0, 255.0], [138.0, 295.0, 295.0, 268.0]], 'icoord': [[5.0, 5.0, 15.0, 15.0], [35.0, 35.0, 45.0, 45.0], [25.0, 25.0, 40.0, 40.0], [10.0, 10.0, 32.5, 32.5]], 'ivl': ['2', '5', '1', '0', '(2)'], 'leaves': [2, 5, 1, 0, 7]}) def test_dendrogram_colors(self): # Tests dendrogram plots with alternate colors Z = linkage(hierarchy_test_data.ytdist, 'single') set_link_color_palette(['c', 'm', 'y', 'k']) R = dendrogram(Z, no_plot=True, above_threshold_color='g', color_threshold=250) set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k']) color_list = R['color_list'] assert_equal(color_list, ['c', 'm', 'g', 'g', 'g']) # reset color palette (global list) set_link_color_palette(None) def calculate_maximum_distances(Z): # Used for testing correctness of maxdists. n = Z.shape[0] + 1 B = np.zeros((n-1,)) q = np.zeros((3,)) for i in xrange(0, n - 1): q[:] = 0.0 left = Z[i, 0] right = Z[i, 1] if left >= n: q[0] = B[int(left) - n] if right >= n: q[1] = B[int(right) - n] q[2] = Z[i, 2] B[i] = q.max() return B def calculate_maximum_inconsistencies(Z, R, k=3): # Used for testing correctness of maxinconsts. n = Z.shape[0] + 1 B = np.zeros((n-1,)) q = np.zeros((3,)) for i in xrange(0, n - 1): q[:] = 0.0 left = Z[i, 0] right = Z[i, 1] if left >= n: q[0] = B[int(left) - n] if right >= n: q[1] = B[int(right) - n] q[2] = R[i, k] B[i] = q.max() return B def within_tol(a, b, tol): return np.abs(a - b).max() < tol def test_unsupported_uncondensed_distance_matrix_linkage_warning(): assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]]) def test_euclidean_linkage_value_error(): for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS: assert_raises(ValueError, linkage, [[1, 1], [1, 1]], method=method, metric='cityblock') def test_2x2_linkage(): Z1 = linkage([1], method='single', metric='euclidean') Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean') assert_allclose(Z1, Z2) def test_node_compare(): np.random.seed(23) nobs = 50 X = np.random.randn(nobs, 4) Z = scipy.cluster.hierarchy.ward(X) tree = to_tree(Z) assert_(tree > tree.get_left()) assert_(tree.get_right() > tree.get_left()) assert_(tree.get_right() == tree.get_right()) assert_(tree.get_right() != tree.get_left()) def test_cut_tree(): np.random.seed(23) nobs = 50 X = np.random.randn(nobs, 4) Z = scipy.cluster.hierarchy.ward(X) cutree = cut_tree(Z) assert_equal(cutree[:, 0], np.arange(nobs)) assert_equal(cutree[:, -1], np.zeros(nobs)) assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1)) assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5)) assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10])) assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5])) nodes = _order_cluster_tree(Z) heights = np.array([node.dist for node in nodes]) assert_equal(cutree[:, np.searchsorted(heights, [5])], cut_tree(Z, height=5)) assert_equal(cutree[:, np.searchsorted(heights, [5, 10])], cut_tree(Z, height=[5, 10])) assert_equal(cutree[:, np.searchsorted(heights, [10, 5])], cut_tree(Z, height=[10, 5])) def test_optimal_leaf_ordering(): # test with the distance vector y Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist), hierarchy_test_data.ytdist) expectedZ = hierarchy_test_data.linkage_ytdist_single_olo assert_allclose(Z, expectedZ, atol=1e-10) # test with the observation matrix X Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'), hierarchy_test_data.X) expectedZ = hierarchy_test_data.linkage_X_ward_olo assert_allclose(Z, expectedZ, atol=1e-06) def test_Heap(): values = np.array([2, -1, 0, -1.5, 3]) heap = Heap(values) pair = heap.get_min() assert_equal(pair['key'], 3) assert_equal(pair['value'], -1.5) heap.remove_min() pair = heap.get_min() assert_equal(pair['key'], 1) assert_equal(pair['value'], -1) heap.change_value(1, 2.5) pair = heap.get_min() assert_equal(pair['key'], 2) assert_equal(pair['value'], 0) heap.remove_min() heap.remove_min() heap.change_value(1, 10) pair = heap.get_min() assert_equal(pair['key'], 4) assert_equal(pair['value'], 3) heap.remove_min() pair = heap.get_min() assert_equal(pair['key'], 1) assert_equal(pair['value'], 10)
41,551
38.08937
100
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/tests/test_vq.py
from __future__ import division, print_function, absolute_import import warnings import sys import numpy as np from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_allclose, assert_equal, assert_) from scipy._lib._numpy_compat import suppress_warnings import pytest from pytest import raises as assert_raises from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten, ClusterError, _krandinit) from scipy.cluster import _vq TESTDATA_2D = np.array([ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68, -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45, 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28, -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07, -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29, -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25, 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21, -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67, -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94, -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33, 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8, -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29, 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75, -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17, 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44, -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83, 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28, 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62, -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35, 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84, -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75, -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86, -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83, 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75, -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03, 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0, 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99, -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21, 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75, 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37, -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0, -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84, 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69, -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51, -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71, -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61, 2.11]).reshape((200, 2)) # Global data X = np.array([[3.0, 3], [4, 3], [4, 2], [9, 2], [5, 1], [6, 2], [9, 4], [5, 2], [5, 4], [7, 4], [6, 5]]) CODET1 = np.array([[3.0000, 3.0000], [6.2000, 4.0000], [5.8000, 1.8000]]) CODET2 = np.array([[11.0/3, 8.0/3], [6.7500, 4.2500], [6.2500, 1.7500]]) LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1]) class TestWhiten(object): def test_whiten(self): desired = np.array([[5.08738849, 2.97091878], [3.19909255, 0.69660580], [4.51041982, 0.02640918], [4.38567074, 0.95120889], [2.32191480, 1.63195503]]) for tp in np.array, np.matrix: obs = tp([[0.98744510, 0.82766775], [0.62093317, 0.19406729], [0.87545741, 0.00735733], [0.85124403, 0.26499712], [0.45067590, 0.45464607]]) assert_allclose(whiten(obs), desired, rtol=1e-5) def test_whiten_zero_std(self): desired = np.array([[0., 1.0, 2.86666544], [0., 1.0, 1.32460034], [0., 1.0, 3.74382172]]) for tp in np.array, np.matrix: obs = tp([[0., 1., 0.74109533], [0., 1., 0.34243798], [0., 1., 0.96785929]]) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_allclose(whiten(obs), desired, rtol=1e-5) assert_equal(len(w), 1) assert_(issubclass(w[-1].category, RuntimeWarning)) def test_whiten_not_finite(self): for tp in np.array, np.matrix: for bad_value in np.nan, np.inf, -np.inf: obs = tp([[0.98744510, bad_value], [0.62093317, 0.19406729], [0.87545741, 0.00735733], [0.85124403, 0.26499712], [0.45067590, 0.45464607]]) assert_raises(ValueError, whiten, obs) class TestVq(object): def test_py_vq(self): initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) for tp in np.array, np.matrix: label1 = py_vq(tp(X), tp(initc))[0] assert_array_equal(label1, LABEL1) def test_vq(self): initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) for tp in np.array, np.matrix: label1, dist = _vq.vq(tp(X), tp(initc)) assert_array_equal(label1, LABEL1) tlabel1, tdist = vq(tp(X), tp(initc)) def test_vq_1d(self): # Test special rank 1 vq algo, python implementation. data = X[:, 0] initc = data[:3] a, b = _vq.vq(data, initc) ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis]) assert_array_equal(a, ta) assert_array_equal(b, tb) def test__vq_sametype(self): a = np.array([1.0, 2.0], dtype=np.float64) b = a.astype(np.float32) assert_raises(TypeError, _vq.vq, a, b) def test__vq_invalid_type(self): a = np.array([1, 2], dtype=int) assert_raises(TypeError, _vq.vq, a, a) def test_vq_large_nfeat(self): X = np.random.rand(20, 20) code_book = np.random.rand(3, 20) codes0, dis0 = _vq.vq(X, code_book) codes1, dis1 = py_vq(X, code_book) assert_allclose(dis0, dis1, 1e-5) assert_array_equal(codes0, codes1) X = X.astype(np.float32) code_book = code_book.astype(np.float32) codes0, dis0 = _vq.vq(X, code_book) codes1, dis1 = py_vq(X, code_book) assert_allclose(dis0, dis1, 1e-5) assert_array_equal(codes0, codes1) def test_vq_large_features(self): X = np.random.rand(10, 5) * 1000000 code_book = np.random.rand(2, 5) * 1000000 codes0, dis0 = _vq.vq(X, code_book) codes1, dis1 = py_vq(X, code_book) assert_allclose(dis0, dis1, 1e-5) assert_array_equal(codes0, codes1) class TestKMean(object): def test_large_features(self): # Generate a data set with large values, and run kmeans on it to # (regression for 1077). d = 300 n = 100 m1 = np.random.randn(d) m2 = np.random.randn(d) x = 10000 * np.random.randn(n, d) - 20000 * m1 y = 10000 * np.random.randn(n, d) + 20000 * m2 data = np.empty((x.shape[0] + y.shape[0], d), np.double) data[:x.shape[0]] = x data[x.shape[0]:] = y kmeans(data, 2) def test_kmeans_simple(self): np.random.seed(54321) initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) for tp in np.array, np.matrix: code1 = kmeans(tp(X), tp(initc), iter=1)[0] assert_array_almost_equal(code1, CODET2) def test_kmeans_lost_cluster(self): # This will cause kmeans to have a cluster with no points. data = TESTDATA_2D initk = np.array([[-1.8127404, -0.67128041], [2.04621601, 0.07401111], [-2.31149087,-0.05160469]]) kmeans(data, initk) with suppress_warnings() as sup: sup.filter(UserWarning, "One of the clusters is empty. Re-run kmeans with a " "different initialization") kmeans2(data, initk, missing='warn') assert_raises(ClusterError, kmeans2, data, initk, missing='raise') def test_kmeans2_simple(self): np.random.seed(12345678) initc = np.concatenate(([[X[0]], [X[1]], [X[2]]])) for tp in np.array, np.matrix: code1 = kmeans2(tp(X), tp(initc), iter=1)[0] code2 = kmeans2(tp(X), tp(initc), iter=2)[0] assert_array_almost_equal(code1, CODET1) assert_array_almost_equal(code2, CODET2) def test_kmeans2_rank1(self): data = TESTDATA_2D data1 = data[:, 0] initc = data1[:3] code = initc.copy() kmeans2(data1, code, iter=1)[0] kmeans2(data1, code, iter=2)[0] def test_kmeans2_rank1_2(self): data = TESTDATA_2D data1 = data[:, 0] kmeans2(data1, 2, iter=1) def test_kmeans2_high_dim(self): # test kmeans2 when the number of dimensions exceeds the number # of input points data = TESTDATA_2D data = data.reshape((20, 20))[:10] kmeans2(data, 2) def test_kmeans2_init(self): np.random.seed(12345) data = TESTDATA_2D kmeans2(data, 3, minit='points') kmeans2(data[:, :1], 3, minit='points') # special case (1-D) kmeans2(data, 3, minit='random') kmeans2(data[:, :1], 3, minit='random') # special case (1-D) @pytest.mark.skipif(sys.platform == 'win32', reason='Fails with MemoryError in Wine.') def test_krandinit(self): data = TESTDATA_2D datas = [data.reshape((200, 2)), data.reshape((20, 20))[:10]] k = int(1e6) for data in datas: np.random.seed(1234) init = _krandinit(data, k) orig_cov = np.cov(data, rowvar=0) init_cov = np.cov(init, rowvar=0) assert_allclose(orig_cov, init_cov, atol=1e-2) def test_kmeans2_empty(self): # Regression test for gh-1032. assert_raises(ValueError, kmeans2, [], 2) def test_kmeans_0k(self): # Regression test for gh-1073: fail when k arg is 0. assert_raises(ValueError, kmeans, X, 0) assert_raises(ValueError, kmeans2, X, 0) assert_raises(ValueError, kmeans2, X, np.array([])) def test_kmeans_large_thres(self): # Regression test for gh-1774 x = np.array([1,2,3,4,10], dtype=float) res = kmeans(x, 1, thresh=1e16) assert_allclose(res[0], np.array([4.])) assert_allclose(res[1], 2.3999999999999999)
11,265
38.529825
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/cluster/tests/hierarchy_test_data.py
from numpy import array Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02], [7.50205180e-01, 4.60299830e-01, 8.98696460e-01], [6.65461230e-01, 6.94011420e-01, 9.10465700e-01], [9.64047590e-01, 1.43082200e-03, 7.39874220e-01], [1.08159060e-01, 5.53028790e-01, 6.63804780e-02], [9.31359130e-01, 8.25424910e-01, 9.52315440e-01], [6.78086960e-01, 3.41903970e-01, 5.61481950e-01], [9.82730940e-01, 7.04605210e-01, 8.70978630e-02], [6.14691610e-01, 4.69989230e-02, 6.02406450e-01], [5.80161260e-01, 9.17354970e-01, 5.88163850e-01], [1.38246310e+00, 1.96358160e+00, 1.94437880e+00], [2.10675860e+00, 1.67148730e+00, 1.34854480e+00], [1.39880070e+00, 1.66142050e+00, 1.32224550e+00], [1.71410460e+00, 1.49176380e+00, 1.45432170e+00], [1.54102340e+00, 1.84374950e+00, 1.64658950e+00], [2.08512480e+00, 1.84524350e+00, 2.17340850e+00], [1.30748740e+00, 1.53801650e+00, 2.16007740e+00], [1.41447700e+00, 1.99329070e+00, 1.99107420e+00], [1.61943490e+00, 1.47703280e+00, 1.89788160e+00], [1.59880600e+00, 1.54988980e+00, 1.57563350e+00], [3.37247380e+00, 2.69635310e+00, 3.39981700e+00], [3.13705120e+00, 3.36528090e+00, 3.06089070e+00], [3.29413250e+00, 3.19619500e+00, 2.90700170e+00], [2.65510510e+00, 3.06785900e+00, 2.97198540e+00], [3.30941040e+00, 2.59283970e+00, 2.57714110e+00], [2.59557220e+00, 3.33477370e+00, 3.08793190e+00], [2.58206180e+00, 3.41615670e+00, 3.26441990e+00], [2.71127000e+00, 2.77032450e+00, 2.63466500e+00], [2.79617850e+00, 3.25473720e+00, 3.41801560e+00], [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]]) ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754., 564., 138., 219., 869., 669.]) linkage_ytdist_single = array([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 255., 3.], [1., 8., 268., 4.], [6., 9., 295., 6.]]) linkage_ytdist_complete = array([[2., 5., 138., 2.], [3., 4., 219., 2.], [1., 6., 400., 3.], [0., 7., 412., 3.], [8., 9., 996., 6.]]) linkage_ytdist_average = array([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 333.5, 3.], [1., 6., 347.5, 3.], [8., 9., 680.77777778, 6.]]) linkage_ytdist_weighted = array([[2., 5., 138., 2.], [3., 4., 219., 2.], [0., 7., 333.5, 3.], [1., 6., 347.5, 3.], [8., 9., 670.125, 6.]]) # the optimal leaf ordering of linkage_ytdist_single linkage_ytdist_single_olo = array([[5., 2., 138., 2.], [4., 3., 219., 2.], [7., 0., 255., 3.], [1., 8., 268., 4.], [6., 9., 295., 6.]]) X = array([[1.43054825, -7.5693489], [6.95887839, 6.82293382], [2.87137846, -9.68248579], [7.87974764, -6.05485803], [8.24018364, -6.09495602], [7.39020262, 8.54004355]]) linkage_X_centroid = array([[3., 4., 0.36265956, 2.], [1., 5., 1.77045373, 2.], [0., 2., 2.55760419, 2.], [6., 8., 6.43614494, 4.], [7., 9., 15.17363237, 6.]]) linkage_X_median = array([[3., 4., 0.36265956, 2.], [1., 5., 1.77045373, 2.], [0., 2., 2.55760419, 2.], [6., 8., 6.43614494, 4.], [7., 9., 15.17363237, 6.]]) linkage_X_ward = array([[3., 4., 0.36265956, 2.], [1., 5., 1.77045373, 2.], [0., 2., 2.55760419, 2.], [6., 8., 9.10208346, 4.], [7., 9., 24.7784379, 6.]]) # the optimal leaf ordering of linkage_X_ward linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.], [5., 1., 1.77045373, 2.], [2., 0., 2.55760419, 2.], [6., 8., 9.10208346, 4.], [7., 9., 24.7784379, 6.]]) inconsistent_ytdist = { 1: array([[138., 0., 1., 0.], [219., 0., 1., 0.], [255., 0., 1., 0.], [268., 0., 1., 0.], [295., 0., 1., 0.]]), 2: array([[138., 0., 1., 0.], [219., 0., 1., 0.], [237., 25.45584412, 2., 0.70710678], [261.5, 9.19238816, 2., 0.70710678], [233.66666667, 83.9424406, 3., 0.7306594]]), 3: array([[138., 0., 1., 0.], [219., 0., 1., 0.], [237., 25.45584412, 2., 0.70710678], [247.33333333, 25.38372182, 3., 0.81417007], [239., 69.36377537, 4., 0.80733783]]), 4: array([[138., 0., 1., 0.], [219., 0., 1., 0.], [237., 25.45584412, 2., 0.70710678], [247.33333333, 25.38372182, 3., 0.81417007], [235., 60.73302232, 5., 0.98793042]])} fcluster_inconsistent = { 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])} fcluster_distance = { 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3, 1, 1, 1, 2, 1, 1, 1, 1, 1]), 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])} fcluster_maxclust = { 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4, 1, 1, 1, 3, 1, 1, 1, 1, 2]), 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])}
6,850
45.924658
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/setup.py
from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='', top_path=None): import warnings from numpy.distutils.misc_util import Configuration from numpy.distutils.system_info import get_info, BlasNotFoundError config = Configuration('odr', parent_package, top_path) libodr_files = ['d_odr.f', 'd_mprec.f', 'dlunoc.f'] blas_info = get_info('blas_opt') if blas_info: libodr_files.append('d_lpk.f') else: warnings.warn(BlasNotFoundError.__doc__) libodr_files.append('d_lpkbls.f') odrpack_src = [join('odrpack', x) for x in libodr_files] config.add_library('odrpack', sources=odrpack_src) sources = ['__odrpack.c'] libraries = ['odrpack'] + blas_info.pop('libraries', []) include_dirs = ['.'] + blas_info.pop('include_dirs', []) config.add_extension('__odrpack', sources=sources, libraries=libraries, include_dirs=include_dirs, depends=(['odrpack.h'] + odrpack_src), **blas_info ) config.add_data_dir('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
1,290
28.340909
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/odrpack.py
""" Python wrappers for Orthogonal Distance Regression (ODRPACK). Notes ===== * Array formats -- FORTRAN stores its arrays in memory column first, i.e. an array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For efficiency and convenience, the input and output arrays of the fitting function (and its Jacobians) are passed to FORTRAN without transposition. Therefore, where the ODRPACK documentation says that the X array is of shape (N, M), it will be passed to the Python function as an array of shape (M, N). If M==1, the one-dimensional case, then nothing matters; if M>1, then your Python functions will be dealing with arrays that are indexed in reverse of the ODRPACK documentation. No real biggie, but watch out for your indexing of the Jacobians: the i,j'th elements (@f_i/@x_j) evaluated at the n'th observation will be returned as jacd[j, i, n]. Except for the Jacobians, it really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, you can always use the transpose() function from scipy explicitly. * Examples -- See the accompanying file test/test.py for examples of how to set up fits of your own. Some are taken from the User's Guide; some are from other sources. * Models -- Some common models are instantiated in the accompanying module models.py . Contributions are welcome. Credits ======= * Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. Robert Kern robert.kern@gmail.com """ from __future__ import division, print_function, absolute_import import numpy from warnings import warn from scipy.odr import __odrpack __all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', 'Data', 'RealData', 'Model', 'Output', 'ODR', 'odr_error', 'odr_stop'] odr = __odrpack.odr class OdrWarning(UserWarning): """ Warning indicating that the data passed into ODR will cause problems when passed into 'odr' that the user should be aware of. """ pass class OdrError(Exception): """ Exception indicating an error in fitting. This is raised by `scipy.odr` if an error occurs during fitting. """ pass class OdrStop(Exception): """ Exception stopping fitting. You can raise this exception in your objective function to tell `scipy.odr` to stop fitting. """ pass # Backwards compatibility odr_error = OdrError odr_stop = OdrStop __odrpack._set_exceptions(OdrError, OdrStop) def _conv(obj, dtype=None): """ Convert an object to the preferred form for input to the odr routine. """ if obj is None: return obj else: if dtype is None: obj = numpy.asarray(obj) else: obj = numpy.asarray(obj, dtype) if obj.shape == (): # Scalar. return obj.dtype.type(obj) else: return obj def _report_error(info): """ Interprets the return code of the odr routine. Parameters ---------- info : int The return code of the odr routine. Returns ------- problems : list(str) A list of messages about why the odr() routine stopped. """ stopreason = ('Blank', 'Sum of squares convergence', 'Parameter convergence', 'Both sum of squares and parameter convergence', 'Iteration limit reached')[info % 5] if info >= 5: # questionable results or fatal error I = (info//10000 % 10, info//1000 % 10, info//100 % 10, info//10 % 10, info % 10) problems = [] if I[0] == 0: if I[1] != 0: problems.append('Derivatives possibly not correct') if I[2] != 0: problems.append('Error occurred in callback') if I[3] != 0: problems.append('Problem is not full rank at solution') problems.append(stopreason) elif I[0] == 1: if I[1] != 0: problems.append('N < 1') if I[2] != 0: problems.append('M < 1') if I[3] != 0: problems.append('NP < 1 or NP > N') if I[4] != 0: problems.append('NQ < 1') elif I[0] == 2: if I[1] != 0: problems.append('LDY and/or LDX incorrect') if I[2] != 0: problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') if I[3] != 0: problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') if I[4] != 0: problems.append('LWORK and/or LIWORK too small') elif I[0] == 3: if I[1] != 0: problems.append('STPB and/or STPD incorrect') if I[2] != 0: problems.append('SCLB and/or SCLD incorrect') if I[3] != 0: problems.append('WE incorrect') if I[4] != 0: problems.append('WD incorrect') elif I[0] == 4: problems.append('Error in derivatives') elif I[0] == 5: problems.append('Error occurred in callback') elif I[0] == 6: problems.append('Numerical error detected') return problems else: return [stopreason] class Data(object): """ The data to fit. Parameters ---------- x : array_like Observed data for the independent variable of the regression y : array_like, optional If array-like, observed data for the dependent variable of the regression. A scalar input implies that the model to be used on the data is implicit. we : array_like, optional If `we` is a scalar, then that value is used for all data points (and all dimensions of the response variable). If `we` is a rank-1 array of length q (the dimensionality of the response variable), then this vector is the diagonal of the covariant weighting matrix for all data points. If `we` is a rank-1 array of length n (the number of data points), then the i'th element is the weight for the i'th response variable observation (single-dimensional only). If `we` is a rank-2 array of shape (q, q), then this is the full covariant weighting matrix broadcast to each observation. If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the diagonal of the covariant weighting matrix for the i'th observation. If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the full specification of the covariant weighting matrix for each observation. If the fit is implicit, then only a positive scalar value is used. wd : array_like, optional If `wd` is a scalar, then that value is used for all data points (and all dimensions of the input variable). If `wd` = 0, then the covariant weighting matrix for each observation is set to the identity matrix (so each dimension of each observation has the same weight). If `wd` is a rank-1 array of length m (the dimensionality of the input variable), then this vector is the diagonal of the covariant weighting matrix for all data points. If `wd` is a rank-1 array of length n (the number of data points), then the i'th element is the weight for the i'th input variable observation (single-dimensional only). If `wd` is a rank-2 array of shape (m, m), then this is the full covariant weighting matrix broadcast to each observation. If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the diagonal of the covariant weighting matrix for the i'th observation. If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the full specification of the covariant weighting matrix for each observation. fix : array_like of ints, optional The `fix` argument is the same as ifixx in the class ODR. It is an array of integers with the same shape as data.x that determines which input observations are treated as fixed. One can use a sequence of length m (the dimensionality of the input observations) to fix some dimensions for all observations. A value of 0 fixes the observation, a value > 0 makes it free. meta : dict, optional Free-form dictionary for metadata. Notes ----- Each argument is attached to the member of the instance of the same name. The structures of `x` and `y` are described in the Model class docstring. If `y` is an integer, then the Data instance can only be used to fit with implicit models where the dimensionality of the response is equal to the specified value of `y`. The `we` argument weights the effect a deviation in the response variable has on the fit. The `wd` argument weights the effect a deviation in the input variable has on the fit. To handle multidimensional inputs and responses easily, the structure of these arguments has the n'th dimensional axis first. These arguments heavily use the structured arguments feature of ODRPACK to conveniently and flexibly support all options. See the ODRPACK User's Guide for a full explanation of how these weights are used in the algorithm. Basically, a higher value of the weight for a particular data point makes a deviation at that point more detrimental to the fit. """ def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}): self.x = _conv(x) if not isinstance(self.x, numpy.ndarray): raise ValueError(("Expected an 'ndarray' of data for 'x', " "but instead got data of type '{name}'").format( name=type(self.x).__name__)) self.y = _conv(y) self.we = _conv(we) self.wd = _conv(wd) self.fix = _conv(fix) self.meta = meta def set_meta(self, **kwds): """ Update the metadata dictionary with the keywords and data provided by keywords. Examples -------- :: data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") """ self.meta.update(kwds) def __getattr__(self, attr): """ Dispatch attribute access to the metadata dictionary. """ if attr in self.meta: return self.meta[attr] else: raise AttributeError("'%s' not in metadata" % attr) class RealData(Data): """ The data, with weightings as actual standard deviations and/or covariances. Parameters ---------- x : array_like Observed data for the independent variable of the regression y : array_like, optional If array-like, observed data for the dependent variable of the regression. A scalar input implies that the model to be used on the data is implicit. sx : array_like, optional Standard deviations of `x`. `sx` are standard deviations of `x` and are converted to weights by dividing 1.0 by their squares. sy : array_like, optional Standard deviations of `y`. `sy` are standard deviations of `y` and are converted to weights by dividing 1.0 by their squares. covx : array_like, optional Covariance of `x` `covx` is an array of covariance matrices of `x` and are converted to weights by performing a matrix inversion on each observation's covariance matrix. covy : array_like, optional Covariance of `y` `covy` is an array of covariance matrices and are converted to weights by performing a matrix inversion on each observation's covariance matrix. fix : array_like, optional The argument and member fix is the same as Data.fix and ODR.ifixx: It is an array of integers with the same shape as `x` that determines which input observations are treated as fixed. One can use a sequence of length m (the dimensionality of the input observations) to fix some dimensions for all observations. A value of 0 fixes the observation, a value > 0 makes it free. meta : dict, optional Free-form dictionary for metadata. Notes ----- The weights `wd` and `we` are computed from provided values as follows: `sx` and `sy` are converted to weights by dividing 1.0 by their squares. For example, ``wd = 1./numpy.power(`sx`, 2)``. `covx` and `covy` are arrays of covariance matrices and are converted to weights by performing a matrix inversion on each observation's covariance matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``. These arguments follow the same structured argument conventions as wd and we only restricted by their natures: `sx` and `sy` can't be rank-3, but `covx` and `covy` can be. Only set *either* `sx` or `covx` (not both). Setting both will raise an exception. Same with `sy` and `covy`. """ def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, fix=None, meta={}): if (sx is not None) and (covx is not None): raise ValueError("cannot set both sx and covx") if (sy is not None) and (covy is not None): raise ValueError("cannot set both sy and covy") # Set flags for __getattr__ self._ga_flags = {} if sx is not None: self._ga_flags['wd'] = 'sx' else: self._ga_flags['wd'] = 'covx' if sy is not None: self._ga_flags['we'] = 'sy' else: self._ga_flags['we'] = 'covy' self.x = _conv(x) if not isinstance(self.x, numpy.ndarray): raise ValueError(("Expected an 'ndarray' of data for 'x', " "but instead got data of type '{name}'").format( name=type(self.x).__name__)) self.y = _conv(y) self.sx = _conv(sx) self.sy = _conv(sy) self.covx = _conv(covx) self.covy = _conv(covy) self.fix = _conv(fix) self.meta = meta def _sd2wt(self, sd): """ Convert standard deviation to weights. """ return 1./numpy.power(sd, 2) def _cov2wt(self, cov): """ Convert covariance matrix(-ices) to weights. """ from numpy.dual import inv if len(cov.shape) == 2: return inv(cov) else: weights = numpy.zeros(cov.shape, float) for i in range(cov.shape[-1]): # n weights[:,:,i] = inv(cov[:,:,i]) return weights def __getattr__(self, attr): lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), ('wd', 'covx'): (self._cov2wt, self.covx), ('we', 'sy'): (self._sd2wt, self.sy), ('we', 'covy'): (self._cov2wt, self.covy)} if attr not in ('wd', 'we'): if attr in self.meta: return self.meta[attr] else: raise AttributeError("'%s' not in metadata" % attr) else: func, arg = lookup_tbl[(attr, self._ga_flags[attr])] if arg is not None: return func(*(arg,)) else: return None class Model(object): """ The Model class stores information about the function you wish to fit. It stores the function itself, at the least, and optionally stores functions which compute the Jacobians used during fitting. Also, one can provide a function that will provide reasonable starting values for the fit parameters possibly given the set of data. Parameters ---------- fcn : function fcn(beta, x) --> y fjacb : function Jacobian of fcn wrt the fit parameters beta. fjacb(beta, x) --> @f_i(x,B)/@B_j fjacd : function Jacobian of fcn wrt the (possibly multidimensional) input variable. fjacd(beta, x) --> @f_i(x,B)/@x_j extra_args : tuple, optional If specified, `extra_args` should be a tuple of extra arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called by `apply(fcn, (beta, x) + extra_args)` estimate : array_like of rank-1 Provides estimates of the fit parameters from the data estimate(data) --> estbeta implicit : boolean If TRUE, specifies that the model is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit against meta : dict, optional freeform dictionary of metadata for the model Notes ----- Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and return a NumPy array. The `estimate` object takes an instance of the Data class. Here are the rules for the shapes of the argument and return arrays of the callback functions: `x` if the input data is single-dimensional, then `x` is rank-1 array; i.e. ``x = array([1, 2, 3, ...]); x.shape = (n,)`` If the input data is multi-dimensional, then `x` is a rank-2 array; i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. In all cases, it has the same shape as the input data array passed to `odr`. `m` is the dimensionality of the input data, `n` is the number of observations. `y` if the response variable is single-dimensional, then `y` is a rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. If the response variable is multi-dimensional, then `y` is a rank-2 array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = (q, n)`` where `q` is the dimensionality of the response variable. `beta` rank-1 array of length `p` where `p` is the number of parameters; i.e. ``beta = array([B_1, B_2, ..., B_p])`` `fjacb` if the response variable is multi-dimensional, then the return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] = d f_l(X,B)/d B_k`` evaluated at the i'th data point. If `q == 1`, then the return array is only rank-2 and with shape `(p, n)`. `fjacd` as with fjacb, only the return array's shape is `(q, m, n)` such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the i'th data point. If `q == 1`, then the return array's shape is `(m, n)`. If `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`. """ def __init__(self, fcn, fjacb=None, fjacd=None, extra_args=None, estimate=None, implicit=0, meta=None): self.fcn = fcn self.fjacb = fjacb self.fjacd = fjacd if extra_args is not None: extra_args = tuple(extra_args) self.extra_args = extra_args self.estimate = estimate self.implicit = implicit self.meta = meta def set_meta(self, **kwds): """ Update the metadata dictionary with the keywords and data provided here. Examples -------- set_meta(name="Exponential", equation="y = a exp(b x) + c") """ self.meta.update(kwds) def __getattr__(self, attr): """ Dispatch attribute access to the metadata. """ if attr in self.meta: return self.meta[attr] else: raise AttributeError("'%s' not in metadata" % attr) class Output(object): """ The Output class stores the output of an ODR run. Attributes ---------- beta : ndarray Estimated parameter values, of shape (q,). sd_beta : ndarray Standard errors of the estimated parameters, of shape (p,). cov_beta : ndarray Covariance matrix of the estimated parameters, of shape (p,p). delta : ndarray, optional Array of estimated errors in input variables, of same shape as `x`. eps : ndarray, optional Array of estimated errors in response variables, of same shape as `y`. xplus : ndarray, optional Array of ``x + delta``. y : ndarray, optional Array ``y = fcn(x + delta)``. res_var : float, optional Residual variance. sum_square : float, optional Sum of squares error. sum_square_delta : float, optional Sum of squares of delta error. sum_square_eps : float, optional Sum of squares of eps error. inv_condnum : float, optional Inverse condition number (cf. ODRPACK UG p. 77). rel_error : float, optional Relative error in function values computed within fcn. work : ndarray, optional Final work array. work_ind : dict, optional Indices into work for drawing out values (cf. ODRPACK UG p. 83). info : int, optional Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). stopreason : list of str, optional `info` interpreted into English. Notes ----- Takes one argument for initialization, the return value from the function `odr`. The attributes listed as "optional" above are only present if `odr` was run with ``full_output=1``. """ def __init__(self, output): self.beta = output[0] self.sd_beta = output[1] self.cov_beta = output[2] if len(output) == 4: # full output self.__dict__.update(output[3]) self.stopreason = _report_error(self.info) def pprint(self): """ Pretty-print important results. """ print('Beta:', self.beta) print('Beta Std Error:', self.sd_beta) print('Beta Covariance:', self.cov_beta) if hasattr(self, 'info'): print('Residual Variance:',self.res_var) print('Inverse Condition #:', self.inv_condnum) print('Reason(s) for Halting:') for r in self.stopreason: print(' %s' % r) class ODR(object): """ The ODR class gathers all information and coordinates the running of the main fitting routine. Members of instances of the ODR class have the same names as the arguments to the initialization routine. Parameters ---------- data : Data class instance instance of the Data class model : Model class instance instance of the Model class Other Parameters ---------------- beta0 : array_like of rank-1 a rank-1 sequence of initial parameter values. Optional if model provides an "estimate" function to estimate these values. delta0 : array_like of floats of rank-1, optional a (double-precision) float array to hold the initial values of the errors in the input variables. Must be same shape as data.x ifixb : array_like of ints of rank-1, optional sequence of integers with the same length as beta0 that determines which parameters are held fixed. A value of 0 fixes the parameter, a value > 0 makes the parameter free. ifixx : array_like of ints with same shape as data.x, optional an array of integers with the same shape as data.x that determines which input observations are treated as fixed. One can use a sequence of length m (the dimensionality of the input observations) to fix some dimensions for all observations. A value of 0 fixes the observation, a value > 0 makes it free. job : int, optional an integer telling ODRPACK what tasks to perform. See p. 31 of the ODRPACK User's Guide if you absolutely must set the value here. Use the method set_job post-initialization for a more readable interface. iprint : int, optional an integer telling ODRPACK what to print. See pp. 33-34 of the ODRPACK User's Guide if you absolutely must set the value here. Use the method set_iprint post-initialization for a more readable interface. errfile : str, optional string with the filename to print ODRPACK errors to. *Do Not Open This File Yourself!* rptfile : str, optional string with the filename to print ODRPACK summaries to. *Do Not Open This File Yourself!* ndigit : int, optional integer specifying the number of reliable digits in the computation of the function. taufac : float, optional float specifying the initial trust region. The default value is 1. The initial trust region is equal to taufac times the length of the first computed Gauss-Newton step. taufac must be less than 1. sstol : float, optional float specifying the tolerance for convergence based on the relative change in the sum-of-squares. The default value is eps**(1/2) where eps is the smallest value such that 1 + eps > 1 for double precision computation on the machine. sstol must be less than 1. partol : float, optional float specifying the tolerance for convergence based on the relative change in the estimated parameters. The default value is eps**(2/3) for explicit models and ``eps**(1/3)`` for implicit models. partol must be less than 1. maxit : int, optional integer specifying the maximum number of iterations to perform. For first runs, maxit is the total number of iterations performed and defaults to 50. For restarts, maxit is the number of additional iterations to perform and defaults to 10. stpb : array_like, optional sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute finite difference derivatives wrt the parameters. stpd : optional array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative step sizes to compute finite difference derivatives wrt the input variable errors. If stpd is a rank-1 array with length m (the dimensionality of the input variable), then the values are broadcast to all observations. sclb : array_like, optional sequence (``len(stpb) == len(beta0)``) of scaling factors for the parameters. The purpose of these scaling factors are to scale all of the parameters to around unity. Normally appropriate scaling factors are computed if this argument is not specified. Specify them yourself if the automatic procedure goes awry. scld : array_like, optional array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling factors for the *errors* in the input variables. Again, these factors are automatically computed if you do not provide them. If scld.shape == (m,), then the scaling factors are broadcast to all observations. work : ndarray, optional array to hold the double-valued working data for ODRPACK. When restarting, takes the value of self.output.work. iwork : ndarray, optional array to hold the integer-valued working data for ODRPACK. When restarting, takes the value of self.output.iwork. Attributes ---------- data : Data The data for this fit model : Model The model used in fit output : Output An instance if the Output class containing all of the returned data from an invocation of ODR.run() or ODR.restart() """ def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None): self.data = data self.model = model if beta0 is None: if self.model.estimate is not None: self.beta0 = _conv(self.model.estimate(self.data)) else: raise ValueError( "must specify beta0 or provide an estimater with the model" ) else: self.beta0 = _conv(beta0) self.delta0 = _conv(delta0) # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit # platforms. # XXX: some other FORTRAN compilers may not agree. self.ifixx = _conv(ifixx, dtype=numpy.int32) self.ifixb = _conv(ifixb, dtype=numpy.int32) self.job = job self.iprint = iprint self.errfile = errfile self.rptfile = rptfile self.ndigit = ndigit self.taufac = taufac self.sstol = sstol self.partol = partol self.maxit = maxit self.stpb = _conv(stpb) self.stpd = _conv(stpd) self.sclb = _conv(sclb) self.scld = _conv(scld) self.work = _conv(work) self.iwork = _conv(iwork) self.output = None self._check() def _check(self): """ Check the inputs for consistency, but don't bother checking things that the builtin function odr will check. """ x_s = list(self.data.x.shape) if isinstance(self.data.y, numpy.ndarray): y_s = list(self.data.y.shape) if self.model.implicit: raise OdrError("an implicit model cannot use response data") else: # implicit model with q == self.data.y y_s = [self.data.y, x_s[-1]] if not self.model.implicit: raise OdrError("an explicit model needs response data") self.set_job(fit_type=1) if x_s[-1] != y_s[-1]: raise OdrError("number of observations do not match") n = x_s[-1] if len(x_s) == 2: m = x_s[0] else: m = 1 if len(y_s) == 2: q = y_s[0] else: q = 1 p = len(self.beta0) # permissible output array shapes fcn_perms = [(q, n)] fjacd_perms = [(q, m, n)] fjacb_perms = [(q, p, n)] if q == 1: fcn_perms.append((n,)) fjacd_perms.append((m, n)) fjacb_perms.append((p, n)) if m == 1: fjacd_perms.append((q, n)) if p == 1: fjacb_perms.append((q, n)) if m == q == 1: fjacd_perms.append((n,)) if p == q == 1: fjacb_perms.append((n,)) # try evaluating the supplied functions to make sure they provide # sensible outputs arglist = (self.beta0, self.data.x) if self.model.extra_args is not None: arglist = arglist + self.model.extra_args res = self.model.fcn(*arglist) if res.shape not in fcn_perms: print(res.shape) print(fcn_perms) raise OdrError("fcn does not output %s-shaped array" % y_s) if self.model.fjacd is not None: res = self.model.fjacd(*arglist) if res.shape not in fjacd_perms: raise OdrError( "fjacd does not output %s-shaped array" % repr((q, m, n))) if self.model.fjacb is not None: res = self.model.fjacb(*arglist) if res.shape not in fjacb_perms: raise OdrError( "fjacb does not output %s-shaped array" % repr((q, p, n))) # check shape of delta0 if self.delta0 is not None and self.delta0.shape != self.data.x.shape: raise OdrError( "delta0 is not a %s-shaped array" % repr(self.data.x.shape)) if self.data.x.size == 0: warn(("Empty data detected for ODR instance. " "Do not expect any fitting to occur"), OdrWarning) def _gen_work(self): """ Generate a suitable work array if one does not already exist. """ n = self.data.x.shape[-1] p = self.beta0.shape[0] if len(self.data.x.shape) == 2: m = self.data.x.shape[0] else: m = 1 if self.model.implicit: q = self.data.y elif len(self.data.y.shape) == 2: q = self.data.y.shape[0] else: q = 1 if self.data.we is None: ldwe = ld2we = 1 elif len(self.data.we.shape) == 3: ld2we, ldwe = self.data.we.shape[1:] else: # Okay, this isn't precisely right, but for this calculation, # it's fine ldwe = 1 ld2we = self.data.we.shape[1] if self.job % 10 < 2: # ODR not OLS lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) else: # OLS not ODR lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + 5*q + q*(p+m) + ldwe*ld2we*q) if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\ and self.work.dtype.str.endswith('f8'): # the existing array is fine return else: self.work = numpy.zeros((lwork,), float) def set_job(self, fit_type=None, deriv=None, var_calc=None, del_init=None, restart=None): """ Sets the "job" parameter is a hopefully comprehensible way. If an argument is not specified, then the value is left as is. The default value from class initialization is for all of these options set to 0. Parameters ---------- fit_type : {0, 1, 2} int 0 -> explicit ODR 1 -> implicit ODR 2 -> ordinary least-squares deriv : {0, 1, 2, 3} int 0 -> forward finite differences 1 -> central finite differences 2 -> user-supplied derivatives (Jacobians) with results checked by ODRPACK 3 -> user-supplied derivatives, no checking var_calc : {0, 1, 2} int 0 -> calculate asymptotic covariance matrix and fit parameter uncertainties (V_B, s_B) using derivatives recomputed at the final solution 1 -> calculate V_B and s_B using derivatives from last iteration 2 -> do not calculate V_B and s_B del_init : {0, 1} int 0 -> initial input variable offsets set to 0 1 -> initial offsets provided by user in variable "work" restart : {0, 1} int 0 -> fit is not a restart 1 -> fit is a restart Notes ----- The permissible values are different from those given on pg. 31 of the ODRPACK User's Guide only in that one cannot specify numbers greater than the last value for each variable. If one does not supply functions to compute the Jacobians, the fitting procedure will change deriv to 0, finite differences, as a default. To initialize the input variable offsets by yourself, set del_init to 1 and put the offsets into the "work" variable correctly. """ if self.job is None: job_l = [0, 0, 0, 0, 0] else: job_l = [self.job // 10000 % 10, self.job // 1000 % 10, self.job // 100 % 10, self.job // 10 % 10, self.job % 10] if fit_type in (0, 1, 2): job_l[4] = fit_type if deriv in (0, 1, 2, 3): job_l[3] = deriv if var_calc in (0, 1, 2): job_l[2] = var_calc if del_init in (0, 1): job_l[1] = del_init if restart in (0, 1): job_l[0] = restart self.job = (job_l[0]*10000 + job_l[1]*1000 + job_l[2]*100 + job_l[3]*10 + job_l[4]) def set_iprint(self, init=None, so_init=None, iter=None, so_iter=None, iter_step=None, final=None, so_final=None): """ Set the iprint parameter for the printing of computation reports. If any of the arguments are specified here, then they are set in the iprint member. If iprint is not set manually or with this method, then ODRPACK defaults to no printing. If no filename is specified with the member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to print to stdout in addition to the specified filename by setting the so_* arguments to this function, but one cannot specify to print to stdout but not a file since one can do that by not specifying a rptfile filename. There are three reports: initialization, iteration, and final reports. They are represented by the arguments init, iter, and final respectively. The permissible values are 0, 1, and 2 representing "no report", "short report", and "long report" respectively. The argument iter_step (0 <= iter_step <= 9) specifies how often to make the iteration report; the report will be made for every iter_step'th iteration starting with iteration one. If iter_step == 0, then no iteration report is made, regardless of the other arguments. If the rptfile is None, then any so_* arguments supplied will raise an exception. """ if self.iprint is None: self.iprint = 0 ip = [self.iprint // 1000 % 10, self.iprint // 100 % 10, self.iprint // 10 % 10, self.iprint % 10] # make a list to convert iprint digits to/from argument inputs # rptfile, stdout ip2arg = [[0, 0], # none, none [1, 0], # short, none [2, 0], # long, none [1, 1], # short, short [2, 1], # long, short [1, 2], # short, long [2, 2]] # long, long if (self.rptfile is None and (so_init is not None or so_iter is not None or so_final is not None)): raise OdrError( "no rptfile specified, cannot output to stdout twice") iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] if init is not None: iprint_l[0] = init if so_init is not None: iprint_l[1] = so_init if iter is not None: iprint_l[2] = iter if so_iter is not None: iprint_l[3] = so_iter if final is not None: iprint_l[4] = final if so_final is not None: iprint_l[5] = so_final if iter_step in range(10): # 0..9 ip[2] = iter_step ip[0] = ip2arg.index(iprint_l[0:2]) ip[1] = ip2arg.index(iprint_l[2:4]) ip[3] = ip2arg.index(iprint_l[4:6]) self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] def run(self): """ Run the fitting routine with all of the information given and with ``full_output=1``. Returns ------- output : Output instance This object is also assigned to the attribute .output . """ args = (self.model.fcn, self.beta0, self.data.y, self.data.x) kwds = {'full_output': 1} kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', 'stpd', 'sclb', 'scld', 'work', 'iwork'] if self.delta0 is not None and self.job % 1000 // 10 == 1: # delta0 provided and fit is not a restart self._gen_work() d0 = numpy.ravel(self.delta0) self.work[:len(d0)] = d0 # set the kwds from other objects explicitly if self.model.fjacb is not None: kwds['fjacb'] = self.model.fjacb if self.model.fjacd is not None: kwds['fjacd'] = self.model.fjacd if self.data.we is not None: kwds['we'] = self.data.we if self.data.wd is not None: kwds['wd'] = self.data.wd if self.model.extra_args is not None: kwds['extra_args'] = self.model.extra_args # implicitly set kwds from self's members for attr in kwd_l: obj = getattr(self, attr) if obj is not None: kwds[attr] = obj self.output = Output(odr(*args, **kwds)) return self.output def restart(self, iter=None): """ Restarts the run with iter more iterations. Parameters ---------- iter : int, optional ODRPACK's default for the number of new iterations is 10. Returns ------- output : Output instance This object is also assigned to the attribute .output . """ if self.output is None: raise OdrError("cannot restart: run() has not been called before") self.set_job(restart=1) self.work = self.output.work self.iwork = self.output.iwork self.maxit = iter return self.run()
41,283
35.599291
97
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/add_newdocs.py
from numpy import add_newdoc add_newdoc('scipy.odr', 'odr', """ odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, full_output=0) Low-level function for ODR. See Also -------- ODR Model Data RealData Notes ----- This is a function performing the same operation as the `ODR`, `Model` and `Data` classes together. The parameters of this function are explained in the class documentation. """) add_newdoc('scipy.odr.__odrpack', '_set_exceptions', """ _set_exceptions(odr_error, odr_stop) Internal function: set exception classes. """)
839
26.096774
292
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/models.py
""" Collection of Model instances for use with the odrpack fitting package. """ from __future__ import division, print_function, absolute_import import numpy as np from scipy.odr.odrpack import Model __all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', 'polynomial'] def _lin_fcn(B, x): a, b = B[0], B[1:] b.shape = (b.shape[0], 1) return a + (x*b).sum(axis=0) def _lin_fjb(B, x): a = np.ones(x.shape[-1], float) res = np.concatenate((a, x.ravel())) res.shape = (B.shape[-1], x.shape[-1]) return res def _lin_fjd(B, x): b = B[1:] b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0) b.shape = x.shape return b def _lin_est(data): # Eh. The answer is analytical, so just return all ones. # Don't return zeros since that will interfere with # ODRPACK's auto-scaling procedures. if len(data.x.shape) == 2: m = data.x.shape[0] else: m = 1 return np.ones((m + 1,), float) def _poly_fcn(B, x, powers): a, b = B[0], B[1:] b.shape = (b.shape[0], 1) return a + np.sum(b * np.power(x, powers), axis=0) def _poly_fjacb(B, x, powers): res = np.concatenate((np.ones(x.shape[-1], float), np.power(x, powers).flat)) res.shape = (B.shape[-1], x.shape[-1]) return res def _poly_fjacd(B, x, powers): b = B[1:] b.shape = (b.shape[0], 1) b = b * powers return np.sum(b * np.power(x, powers-1),axis=0) def _exp_fcn(B, x): return B[0] + np.exp(B[1] * x) def _exp_fjd(B, x): return B[1] * np.exp(B[1] * x) def _exp_fjb(B, x): res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) res.shape = (2, x.shape[-1]) return res def _exp_est(data): # Eh. return np.array([1., 1.]) multilinear = Model(_lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, meta={'name': 'Arbitrary-dimensional Linear', 'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]', 'TeXequ':r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) def polynomial(order): """ Factory function for a general polynomial model. Parameters ---------- order : int or sequence If an integer, it becomes the order of the polynomial to fit. If a sequence of numbers, then these are the explicit powers in the polynomial. A constant term (power 0) is always included, so don't include 0. Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). Returns ------- polynomial : Model instance Model instance. """ powers = np.asarray(order) if powers.shape == (): # Scalar. powers = np.arange(1, powers + 1) powers.shape = (len(powers), 1) len_beta = len(powers) + 1 def _poly_est(data, len_beta=len_beta): # Eh. Ignore data and return all ones. return np.ones((len_beta,), float) return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, estimate=_poly_est, extra_args=(powers,), meta={'name': 'Sorta-general Polynomial', 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % (len_beta-1)}) exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, estimate=_exp_est, meta={'name':'Exponential', 'equ': 'y= B_0 + exp(B_1 * x)', 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) def _unilin(B, x): return x*B[0] + B[1] def _unilin_fjd(B, x): return np.ones(x.shape, float) * B[0] def _unilin_fjb(B, x): _ret = np.concatenate((x, np.ones(x.shape, float))) _ret.shape = (2,) + x.shape return _ret def _unilin_est(data): return (1., 1.) def _quadratic(B, x): return x*(x*B[0] + B[1]) + B[2] def _quad_fjd(B, x): return 2*x*B[0] + B[1] def _quad_fjb(B, x): _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) _ret.shape = (3,) + x.shape return _ret def _quad_est(data): return (1.,1.,1.) unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, estimate=_unilin_est, meta={'name': 'Univariate Linear', 'equ': 'y = B_0 * x + B_1', 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, meta={'name': 'Quadratic', 'equ': 'y = B_0*x**2 + B_1*x + B_2', 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
4,663
23.808511
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/__init__.py
""" ================================================= Orthogonal distance regression (:mod:`scipy.odr`) ================================================= .. currentmodule:: scipy.odr Package Content =============== .. autosummary:: :toctree: generated/ Data -- The data to fit. RealData -- Data with weights as actual std. dev.s and/or covariances. Model -- Stores information about the function to be fit. ODR -- Gathers all info & manages the main fitting routine. Output -- Result from the fit. odr -- Low-level function for ODR. OdrWarning -- Warning about potential problems when running ODR OdrError -- Error exception. OdrStop -- Stop exception. odr_error -- Same as OdrError (for backwards compatibility) odr_stop -- Same as OdrStop (for backwards compatibility) Prebuilt models: .. autosummary:: :toctree: generated/ polynomial .. data:: exponential .. data:: multilinear .. data:: unilinear .. data:: quadratic .. data:: polynomial Usage information ================= Introduction ------------ Why Orthogonal Distance Regression (ODR)? Sometimes one has measurement errors in the explanatory (a.k.a., "independent") variable(s), not just the response (a.k.a., "dependent") variable(s). Ordinary Least Squares (OLS) fitting procedures treat the data for explanatory variables as fixed, i.e., not subject to error of any kind. Furthermore, OLS procedures require that the response variables be an explicit function of the explanatory variables; sometimes making the equation explicit is impractical and/or introduces errors. ODR can handle both of these cases with ease, and can even reduce to the OLS case if that is sufficient for the problem. ODRPACK is a FORTRAN-77 library for performing ODR with possibly non-linear fitting functions. It uses a modified trust-region Levenberg-Marquardt-type algorithm [1]_ to estimate the function parameters. The fitting functions are provided by Python functions operating on NumPy arrays. The required derivatives may be provided by Python functions as well, or may be estimated numerically. ODRPACK can do explicit or implicit ODR fits, or it can do OLS. Input and output variables may be multi-dimensional. Weights can be provided to account for different variances of the observations, and even covariances between dimensions of the variables. The `scipy.odr` package offers an object-oriented interface to ODRPACK, in addition to the low-level `odr` function. Additional background information about ODRPACK can be found in the `ODRPACK User's Guide <https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading which is recommended. Basic usage ----------- 1. Define the function you want to fit against.:: def f(B, x): '''Linear function y = m*x + b''' # B is a vector of the parameters. # x is an array of the current x values. # x is in the same format as the x passed to Data or RealData. # # Return an array in the same format as y passed to Data or RealData. return B[0]*x + B[1] 2. Create a Model.:: linear = Model(f) 3. Create a Data or RealData instance.:: mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) or, when the actual covariances are known:: mydata = RealData(x, y, sx=sx, sy=sy) 4. Instantiate ODR with your data, model and initial parameter estimate.:: myodr = ODR(mydata, linear, beta0=[1., 2.]) 5. Run the fit.:: myoutput = myodr.run() 6. Examine output.:: myoutput.pprint() References ---------- .. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," in "Statistical analysis of measurement error models and applications: proceedings of the AMS-IMS-SIAM joint summer research conference held June 10-16, 1989," Contemporary Mathematics, vol. 112, pg. 186, 1990. """ # version: 0.7 # author: Robert Kern <robert.kern@gmail.com> # date: 2006-09-21 from __future__ import division, print_function, absolute_import from .odrpack import * from .models import * from . import add_newdocs __all__ = [s for s in dir() if not s.startswith('_')] from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
4,343
29.166667
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/tests/test_odr.py
from __future__ import division, print_function, absolute_import # Scipy imports. import numpy as np from numpy import pi from numpy.testing import (assert_array_almost_equal, assert_equal, assert_warns) from pytest import raises as assert_raises from scipy.odr import Data, Model, ODR, RealData, OdrStop, OdrWarning class TestODR(object): # Bad Data for 'x' def test_bad_data(self): assert_raises(ValueError, Data, 2, 1) assert_raises(ValueError, RealData, 2, 1) # Empty Data for 'x' def empty_data_func(self, B, x): return B[0]*x + B[1] def test_empty_data(self): beta0 = [0.02, 0.0] linear = Model(self.empty_data_func) empty_dat = Data([], []) assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0) empty_dat = RealData([], []) assert_warns(OdrWarning, ODR, empty_dat, linear, beta0=beta0) # Explicit Example def explicit_fcn(self, B, x): ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) return ret def explicit_fjd(self, B, x): eBx = np.exp(B[2]*x) ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx return ret def explicit_fjb(self, B, x): eBx = np.exp(B[2]*x) res = np.vstack([np.ones(x.shape[-1]), np.power(eBx-1.0, 2), B[1]*2.0*(eBx-1.0)*eBx*x]) return res def test_explicit(self): explicit_mod = Model( self.explicit_fcn, fjacb=self.explicit_fjb, fjacd=self.explicit_fjd, meta=dict(name='Sample Explicit Model', ref='ODRPACK UG, pg. 39'), ) explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, 1213.8,1215.5,1212.]) explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) explicit_odr.set_job(deriv=2) explicit_odr.set_iprint(init=0, iter=0, final=0) out = explicit_odr.run() assert_array_almost_equal( out.beta, np.array([1.2646548050648876e+03, -5.4018409956678255e+01, -8.7849712165253724e-02]), ) assert_array_almost_equal( out.sd_beta, np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), ) assert_array_almost_equal( out.cov_beta, np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, -8.0978217468468912e-04], [-3.7421976890364739e-01, 1.0529686462751804e+00, -1.9453521827942002e-03], [-8.0978217468468912e-04, -1.9453521827942002e-03, 1.6827336938454476e-05]]), ) # Implicit Example def implicit_fcn(self, B, x): return (B[2]*np.power(x[0]-B[0], 2) + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + B[4]*np.power(x[1]-B[1], 2) - 1.0) def test_implicit(self): implicit_mod = Model( self.implicit_fcn, implicit=1, meta=dict(name='Sample Implicit Model', ref='ODRPACK UG, pg. 49'), ) implicit_dat = Data([ [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], 1, ) implicit_odr = ODR(implicit_dat, implicit_mod, beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) out = implicit_odr.run() assert_array_almost_equal( out.beta, np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, 0.0162299708984738, 0.0797537982976416]), ) assert_array_almost_equal( out.sd_beta, np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, 0.0027500347539902, 0.0034962501532468]), ) assert_array_almost_equal( out.cov_beta, np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, 7.0263550868344446e-02, -4.7175267373474862e-02, 5.2515575927380355e-02], [-1.9437686411979040e+00, 2.0481509222414456e+00, -6.1600515853057307e-02, 4.6268827806232933e-02, -5.8822307501391467e-02], [7.0263550868344446e-02, -6.1600515853057307e-02, 2.8659542561579308e-03, -1.4628662260014491e-03, 1.4528860663055824e-03], [-4.7175267373474862e-02, 4.6268827806232933e-02, -1.4628662260014491e-03, 1.2855592885514335e-03, -1.2692942951415293e-03], [5.2515575927380355e-02, -5.8822307501391467e-02, 1.4528860663055824e-03, -1.2692942951415293e-03, 2.0778813389755596e-03]]), ) # Multi-variable Example def multi_fcn(self, B, x): if (x < 0.0).any(): raise OdrStop theta = pi*B[3]/2. ctheta = np.cos(theta) stheta = np.sin(theta) omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + np.power(omega*stheta, 2)), -B[4]) ret = np.vstack([B[1] + r*np.cos(B[4]*phi), r*np.sin(B[4]*phi)]) return ret def test_multi(self): multi_mod = Model( self.multi_fcn, meta=dict(name='Sample Multi-Response Model', ref='ODRPACK UG, pg. 56'), ) multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) multi_y = np.array([ [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, 2.934, 2.876, 2.838, 2.798, 2.759], [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, 0.202, 0.182, 0.168, 0.153, 0.139], ]) n = len(multi_x) multi_we = np.zeros((2, 2, n), dtype=float) multi_ifixx = np.ones(n, dtype=int) multi_delta = np.zeros(n, dtype=float) multi_we[0,0,:] = 559.6 multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 multi_we[1,1,:] = 8397.0 for i in range(n): if multi_x[i] < 100.0: multi_ifixx[i] = 0 elif multi_x[i] <= 150.0: pass # defaults are fine elif multi_x[i] <= 1000.0: multi_delta[i] = 25.0 elif multi_x[i] <= 10000.0: multi_delta[i] = 560.0 elif multi_x[i] <= 100000.0: multi_delta[i] = 9500.0 else: multi_delta[i] = 144000.0 if multi_x[i] == 100.0 or multi_x[i] == 150.0: multi_we[:,:,i] = 0.0 multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), we=multi_we) multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], delta0=multi_delta, ifixx=multi_ifixx) multi_odr.set_job(deriv=1, del_init=1) out = multi_odr.run() assert_array_almost_equal( out.beta, np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, 0.5101147161764654, 0.5173902330489161]), ) assert_array_almost_equal( out.sd_beta, np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, 0.0132642749596149, 0.0288529201353984]), ) assert_array_almost_equal( out.cov_beta, np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, -0.0058700836512467, 0.011281212888768], [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, -0.0051181304940204, 0.0130726943624117], [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, -0.0563083340093696, 0.1269490939468611], [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, 0.0066939246261263, -0.0140184391377962], [0.011281212888768, 0.0130726943624117, 0.1269490939468611, -0.0140184391377962, 0.0316733013820852]]), ) # Pearson's Data # K. Pearson, Philosophical Magazine, 2, 559 (1901) def pearson_fcn(self, B, x): return B[0] + B[1]*x def test_pearson(self): p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) # Reverse the data to test invariance of results pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) out = p_odr.run() assert_array_almost_equal( out.beta, np.array([5.4767400299231674, -0.4796082367610305]), ) assert_array_almost_equal( out.sd_beta, np.array([0.3590121690702467, 0.0706291186037444]), ) assert_array_almost_equal( out.cov_beta, np.array([[0.0854275622946333, -0.0161807025443155], [-0.0161807025443155, 0.003306337993922]]), ) rout = pr_odr.run() assert_array_almost_equal( rout.beta, np.array([11.4192022410781231, -2.0850374506165474]), ) assert_array_almost_equal( rout.sd_beta, np.array([0.9820231665657161, 0.3070515616198911]), ) assert_array_almost_equal( rout.cov_beta, np.array([[0.6391799462548782, -0.1955657291119177], [-0.1955657291119177, 0.0624888159223392]]), ) # Lorentz Peak # The data is taken from one of the undergraduate physics labs I performed. def lorentz(self, beta, x): return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) def test_lorentz(self): l_sy = np.array([.29]*18) l_sx = np.array([.000972971,.000948268,.000707632,.000706679, .000706074, .000703918,.000698955,.000456856, .000455207,.000662717,.000654619,.000652694, .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) l_dat = RealData( [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, 3.6562, 3.62498, 3.55525, 3.41886], [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], sx=l_sx, sy=l_sy, ) l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) out = l_odr.run() assert_array_almost_equal( out.beta, np.array([1.4306780846149925e+03, 1.3390509034538309e-01, 3.7798193600109009e+00]), ) assert_array_almost_equal( out.sd_beta, np.array([7.3621186811330963e-01, 3.5068899941471650e-04, 2.4451209281408992e-04]), ) assert_array_almost_equal( out.cov_beta, np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, -3.1236953270424990e-05], [-6.9067261911110836e-05, 5.6077531517333009e-08, 3.6133261832722601e-08], [-3.1236953270424990e-05, 3.6133261832722601e-08, 2.7261220025171730e-08]]), ) def test_ticket_1253(self): def linear(c, x): return c[0]*x+c[1] c = [2.0, 3.0] x = np.linspace(0, 10) y = linear(c, x) model = Model(linear) data = Data(x, y, wd=1.0, we=1.0) job = ODR(data, model, beta0=[1.0, 1.0]) result = job.run() assert_equal(result.info, 2)
13,045
36.596542
83
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/odr/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/vonmises.py
from __future__ import division, print_function, absolute_import import numpy as np import scipy.stats from scipy.special import i0 def von_mises_cdf_series(k,x,p): x = float(x) s = np.sin(x) c = np.cos(x) sn = np.sin(p*x) cn = np.cos(p*x) R = 0 V = 0 for n in range(p-1,0,-1): sn, cn = sn*c - cn*s, cn*c + sn*s R = 1./(2*n/k + R) V = R*(sn/n+V) return 0.5+x/(2*np.pi) + V/np.pi def von_mises_cdf_normalapprox(k, x): b = np.sqrt(2/np.pi)*np.exp(k)/i0(k) z = b*np.sin(x/2.) return scipy.stats.norm.cdf(z) def von_mises_cdf(k,x): ix = 2*np.pi*np.round(x/(2*np.pi)) x = x-ix k = float(k) # These values should give 12 decimal digits CK = 50 a = [28., 0.5, 100., 5.0] if k < CK: p = int(np.ceil(a[0]+a[1]*k-a[2]/(k+a[3]))) F = np.clip(von_mises_cdf_series(k,x,p),0,1) else: F = von_mises_cdf_normalapprox(k, x) return F+ix
963
19.510638
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_continuous_distns.py
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from __future__ import division, print_function, absolute_import import warnings import numpy as np from scipy.misc.doccer import (extend_notes_in_docstring, replace_notes_in_docstring) from scipy import optimize from scipy import integrate import scipy.special as sc from scipy._lib._numpy_compat import broadcast_to from . import _stats from ._tukeylambda_stats import (tukeylambda_variance as _tlvar, tukeylambda_kurtosis as _tlkurt) from ._distn_infrastructure import (get_distribution_names, _kurtosis, _lazyselect, _lazywhere, _ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf, rv_continuous, _skew, valarray) from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX # In numpy 1.12 and above, np.power refuses to raise integers to negative # powers, and `np.float_power` is a new replacement. try: float_power = np.float_power except AttributeError: float_power = np.power ## Kolmogorov-Smirnov one-sided and two-sided test statistics class ksone_gen(rv_continuous): """General Kolmogorov-Smirnov one-sided test. %(default)s """ def _cdf(self, x, n): return 1.0 - sc.smirnov(n, x) def _ppf(self, q, n): return sc.smirnovi(n, 1.0 - q) ksone = ksone_gen(a=0.0, name='ksone') class kstwobign_gen(rv_continuous): """Kolmogorov-Smirnov two-sided test for large N. %(default)s """ def _cdf(self, x): return 1.0 - sc.kolmogorov(x) def _sf(self, x): return sc.kolmogorov(x) def _ppf(self, q): return sc.kolmogi(1.0 - q) kstwobign = kstwobign_gen(a=0.0, name='kstwobign') ## Normal distribution # loc = mu, scale = std # Keep these implementations out of the class definition so they can be reused # by other distributions. _norm_pdf_C = np.sqrt(2*np.pi) _norm_pdf_logC = np.log(_norm_pdf_C) def _norm_pdf(x): return np.exp(-x**2/2.0) / _norm_pdf_C def _norm_logpdf(x): return -x**2 / 2.0 - _norm_pdf_logC def _norm_cdf(x): return sc.ndtr(x) def _norm_logcdf(x): return sc.log_ndtr(x) def _norm_ppf(q): return sc.ndtri(q) def _norm_sf(x): return _norm_cdf(-x) def _norm_logsf(x): return _norm_logcdf(-x) def _norm_isf(q): return -_norm_ppf(q) class norm_gen(rv_continuous): r"""A normal continuous random variable. The location (loc) keyword specifies the mean. The scale (scale) keyword specifies the standard deviation. %(before_notes)s Notes ----- The probability density function for `norm` is: .. math:: f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}} The survival function, ``norm.sf``, is also referred to as the Q-function in some contexts (see, e.g., `Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition). %(after_notes)s %(example)s """ def _rvs(self): return self._random_state.standard_normal(self._size) def _pdf(self, x): # norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi) return _norm_pdf(x) def _logpdf(self, x): return _norm_logpdf(x) def _cdf(self, x): return _norm_cdf(x) def _logcdf(self, x): return _norm_logcdf(x) def _sf(self, x): return _norm_sf(x) def _logsf(self, x): return _norm_logsf(x) def _ppf(self, q): return _norm_ppf(q) def _isf(self, q): return _norm_isf(q) def _stats(self): return 0.0, 1.0, 0.0, 0.0 def _entropy(self): return 0.5*(np.log(2*np.pi)+1) @replace_notes_in_docstring(rv_continuous, notes="""\ This function uses explicit formulas for the maximum likelihood estimation of the normal distribution parameters, so the `optimizer` argument is ignored.\n\n""") def fit(self, data, **kwds): floc = kwds.get('floc', None) fscale = kwds.get('fscale', None) if floc is not None and fscale is not None: # This check is for consistency with `rv_continuous.fit`. # Without this check, this function would just return the # parameters that were given. raise ValueError("All parameters fixed. There is nothing to " "optimize.") data = np.asarray(data) if floc is None: loc = data.mean() else: loc = floc if fscale is None: scale = np.sqrt(((data - loc)**2).mean()) else: scale = fscale return loc, scale norm = norm_gen(name='norm') class alpha_gen(rv_continuous): r"""An alpha continuous random variable. %(before_notes)s Notes ----- The probability density function for `alpha` is: .. math:: f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} * \exp(-\frac{1}{2} (a-1/x)^2) where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``. `alpha` takes ``a`` as a shape parameter. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, a): # alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2) return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x) def _logpdf(self, x, a): return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a)) def _cdf(self, x, a): return _norm_cdf(a-1.0/x) / _norm_cdf(a) def _ppf(self, q, a): return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a))) def _stats(self, a): return [np.inf]*2 + [np.nan]*2 alpha = alpha_gen(a=0.0, name='alpha') class anglit_gen(rv_continuous): r"""An anglit continuous random variable. %(before_notes)s Notes ----- The probability density function for `anglit` is: .. math:: f(x) = \sin(2x + \pi/2) = \cos(2x) for :math:`-\pi/4 \le x \le \pi/4`. %(after_notes)s %(example)s """ def _pdf(self, x): # anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x) return np.cos(2*x) def _cdf(self, x): return np.sin(x+np.pi/4)**2.0 def _ppf(self, q): return np.arcsin(np.sqrt(q))-np.pi/4 def _stats(self): return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2 def _entropy(self): return 1-np.log(2) anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit') class arcsine_gen(rv_continuous): r"""An arcsine continuous random variable. %(before_notes)s Notes ----- The probability density function for `arcsine` is: .. math:: f(x) = \frac{1}{\pi \sqrt{x (1-x)}} for :math:`0 \le x \le 1`. %(after_notes)s %(example)s """ def _pdf(self, x): # arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) return 1.0/np.pi/np.sqrt(x*(1-x)) def _cdf(self, x): return 2.0/np.pi*np.arcsin(np.sqrt(x)) def _ppf(self, q): return np.sin(np.pi/2.0*q)**2.0 def _stats(self): mu = 0.5 mu2 = 1.0/8 g1 = 0 g2 = -3.0/2.0 return mu, mu2, g1, g2 def _entropy(self): return -0.24156447527049044468 arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine') class FitDataError(ValueError): # This exception is raised by, for example, beta_gen.fit when both floc # and fscale are fixed and there are values in the data not in the open # interval (floc, floc+fscale). def __init__(self, distr, lower, upper): self.args = ( "Invalid values in `data`. Maximum likelihood " "estimation with {distr!r} requires that {lower!r} < x " "< {upper!r} for each x in `data`.".format( distr=distr, lower=lower, upper=upper), ) class FitSolverError(RuntimeError): # This exception is raised by, for example, beta_gen.fit when # optimize.fsolve returns with ier != 1. def __init__(self, mesg): emsg = "Solver for the MLE equations failed to converge: " emsg += mesg.replace('\n', '') self.args = (emsg,) def _beta_mle_a(a, b, n, s1): # The zeros of this function give the MLE for `a`, with # `b`, `n` and `s1` given. `s1` is the sum of the logs of # the data. `n` is the number of data points. psiab = sc.psi(a + b) func = s1 - n * (-psiab + sc.psi(a)) return func def _beta_mle_ab(theta, n, s1, s2): # Zeros of this function are critical points of # the maximum likelihood function. Solving this system # for theta (which contains a and b) gives the MLE for a and b # given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data, # and `s2` is the sum of the logs of 1 - data. `n` is the number # of data points. a, b = theta psiab = sc.psi(a + b) func = [s1 - n * (-psiab + sc.psi(a)), s2 - n * (-psiab + sc.psi(b))] return func class beta_gen(rv_continuous): r"""A beta continuous random variable. %(before_notes)s Notes ----- The probability density function for `beta` is: .. math:: f(x, a, b) = \frac{\gamma(a+b) x^{a-1} (1-x)^{b-1}} {\gamma(a) \gamma(b)} for :math:`0 < x < 1`, :math:`a > 0`, :math:`b > 0`, where :math:`\gamma(z)` is the gamma function (`scipy.special.gamma`). `beta` takes :math:`a` and :math:`b` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, a, b): return self._random_state.beta(a, b, self._size) def _pdf(self, x, a, b): # gamma(a+b) * x**(a-1) * (1-x)**(b-1) # beta.pdf(x, a, b) = ------------------------------------ # gamma(a)*gamma(b) return np.exp(self._logpdf(x, a, b)) def _logpdf(self, x, a, b): lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x) lPx -= sc.betaln(a, b) return lPx def _cdf(self, x, a, b): return sc.btdtr(a, b, x) def _ppf(self, q, a, b): return sc.btdtri(a, b, q) def _stats(self, a, b): mn = a*1.0 / (a + b) var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0 g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b) g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b)) g2 /= a*b*(a+b+2)*(a+b+3) return mn, var, g1, g2 def _fitstart(self, data): g1 = _skew(data) g2 = _kurtosis(data) def func(x): a, b = x sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b) ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2) ku /= a*b*(a+b+2)*(a+b+3) ku *= 6 return [sk-g1, ku-g2] a, b = optimize.fsolve(func, (1.0, 1.0)) return super(beta_gen, self)._fitstart(data, args=(a, b)) @extend_notes_in_docstring(rv_continuous, notes="""\ In the special case where both `floc` and `fscale` are given, a `ValueError` is raised if any value `x` in `data` does not satisfy `floc < x < floc + fscale`.\n\n""") def fit(self, data, *args, **kwds): # Override rv_continuous.fit, so we can more efficiently handle the # case where floc and fscale are given. f0 = (kwds.get('f0', None) or kwds.get('fa', None) or kwds.get('fix_a', None)) f1 = (kwds.get('f1', None) or kwds.get('fb', None) or kwds.get('fix_b', None)) floc = kwds.get('floc', None) fscale = kwds.get('fscale', None) if floc is None or fscale is None: # do general fit return super(beta_gen, self).fit(data, *args, **kwds) if f0 is not None and f1 is not None: # This check is for consistency with `rv_continuous.fit`. raise ValueError("All parameters fixed. There is nothing to " "optimize.") # Special case: loc and scale are constrained, so we are fitting # just the shape parameters. This can be done much more efficiently # than the method used in `rv_continuous.fit`. (See the subsection # "Two unknown parameters" in the section "Maximum likelihood" of # the Wikipedia article on the Beta distribution for the formulas.) # Normalize the data to the interval [0, 1]. data = (np.ravel(data) - floc) / fscale if np.any(data <= 0) or np.any(data >= 1): raise FitDataError("beta", lower=floc, upper=floc + fscale) xbar = data.mean() if f0 is not None or f1 is not None: # One of the shape parameters is fixed. if f0 is not None: # The shape parameter a is fixed, so swap the parameters # and flip the data. We always solve for `a`. The result # will be swapped back before returning. b = f0 data = 1 - data xbar = 1 - xbar else: b = f1 # Initial guess for a. Use the formula for the mean of the beta # distribution, E[x] = a / (a + b), to generate a reasonable # starting point based on the mean of the data and the given # value of b. a = b * xbar / (1 - xbar) # Compute the MLE for `a` by solving _beta_mle_a. theta, info, ier, mesg = optimize.fsolve( _beta_mle_a, a, args=(b, len(data), np.log(data).sum()), full_output=True ) if ier != 1: raise FitSolverError(mesg=mesg) a = theta[0] if f0 is not None: # The shape parameter a was fixed, so swap back the # parameters. a, b = b, a else: # Neither of the shape parameters is fixed. # s1 and s2 are used in the extra arguments passed to _beta_mle_ab # by optimize.fsolve. s1 = np.log(data).sum() s2 = sc.log1p(-data).sum() # Use the "method of moments" to estimate the initial # guess for a and b. fac = xbar * (1 - xbar) / data.var(ddof=0) - 1 a = xbar * fac b = (1 - xbar) * fac # Compute the MLE for a and b by solving _beta_mle_ab. theta, info, ier, mesg = optimize.fsolve( _beta_mle_ab, [a, b], args=(len(data), s1, s2), full_output=True ) if ier != 1: raise FitSolverError(mesg=mesg) a, b = theta return a, b, floc, fscale beta = beta_gen(a=0.0, b=1.0, name='beta') class betaprime_gen(rv_continuous): r"""A beta prime continuous random variable. %(before_notes)s Notes ----- The probability density function for `betaprime` is: .. math:: f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)} for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta function (see `scipy.special.beta`). `betaprime` takes ``a`` and ``b`` as shape parameters. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self, a, b): sz, rndm = self._size, self._random_state u1 = gamma.rvs(a, size=sz, random_state=rndm) u2 = gamma.rvs(b, size=sz, random_state=rndm) return u1 / u2 def _pdf(self, x, a, b): # betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b) return np.exp(self._logpdf(x, a, b)) def _logpdf(self, x, a, b): return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b) def _cdf(self, x, a, b): return sc.betainc(a, b, x/(1.+x)) def _munp(self, n, a, b): if n == 1.0: return np.where(b > 1, a/(b-1.0), np.inf) elif n == 2.0: return np.where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), np.inf) elif n == 3.0: return np.where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)), np.inf) elif n == 4.0: return np.where(b > 4, (a*(a + 1.0)*(a + 2.0)*(a + 3.0) / ((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))), np.inf) else: raise NotImplementedError betaprime = betaprime_gen(a=0.0, name='betaprime') class bradford_gen(rv_continuous): r"""A Bradford continuous random variable. %(before_notes)s Notes ----- The probability density function for `bradford` is: .. math:: f(x, c) = \frac{c}{k (1+cx)} for :math:`0 < x < 1`, :math:`c > 0` and :math:`k = \log(1+c)`. `bradford` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # bradford.pdf(x, c) = c / (k * (1+c*x)) return c / (c*x + 1.0) / sc.log1p(c) def _cdf(self, x, c): return sc.log1p(c*x) / sc.log1p(c) def _ppf(self, q, c): return sc.expm1(q * sc.log1p(c)) / c def _stats(self, c, moments='mv'): k = np.log(1.0+c) mu = (c-k)/(c*k) mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k) g1 = None g2 = None if 's' in moments: g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3)) g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k) if 'k' in moments: g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) + 6*c*k*k*(3*k-14) + 12*k**3) g2 /= 3*c*(c*(k-2)+2*k)**2 return mu, mu2, g1, g2 def _entropy(self, c): k = np.log(1+c) return k/2.0 - np.log(c/k) bradford = bradford_gen(a=0.0, b=1.0, name='bradford') class burr_gen(rv_continuous): r"""A Burr (Type III) continuous random variable. %(before_notes)s See Also -------- fisk : a special case of either `burr` or ``burr12`` with ``d = 1`` burr12 : Burr Type XII distribution Notes ----- The probability density function for `burr` is: .. math:: f(x, c, d) = c d x^{-c-1} (1+x^{-c})^{-d-1} for :math:`x > 0`. `burr` takes :math:`c` and :math:`d` as shape parameters. This is the PDF corresponding to the third CDF given in Burr's list; specifically, it is equation (11) in Burr's paper [1]_. %(after_notes)s References ---------- .. [1] Burr, I. W. "Cumulative frequency functions", Annals of Mathematical Statistics, 13(2), pp 215-232 (1942). %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, c, d): # burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1) return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0)) def _cdf(self, x, c, d): return (1 + x**(-c))**(-d) def _ppf(self, q, c, d): return (q**(-1.0/d) - 1)**(-1.0/c) def _munp(self, n, c, d): nc = 1. * n / c return d * sc.beta(1.0 - nc, d + nc) burr = burr_gen(a=0.0, name='burr') class burr12_gen(rv_continuous): r"""A Burr (Type XII) continuous random variable. %(before_notes)s See Also -------- fisk : a special case of either `burr` or ``burr12`` with ``d = 1`` burr : Burr Type III distribution Notes ----- The probability density function for `burr` is: .. math:: f(x, c, d) = c d x^{c-1} (1+x^c)^{-d-1} for :math:`x > 0`. `burr12` takes :math:`c` and :math:`d` as shape parameters. This is the PDF corresponding to the twelfth CDF given in Burr's list; specifically, it is equation (20) in Burr's paper [1]_. %(after_notes)s The Burr type 12 distribution is also sometimes referred to as the Singh-Maddala distribution from NIST [2]_. References ---------- .. [1] Burr, I. W. "Cumulative frequency functions", Annals of Mathematical Statistics, 13(2), pp 215-232 (1942). .. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, c, d): # burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1) return np.exp(self._logpdf(x, c, d)) def _logpdf(self, x, c, d): return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c) def _cdf(self, x, c, d): return -sc.expm1(self._logsf(x, c, d)) def _logcdf(self, x, c, d): return sc.log1p(-(1 + x**c)**(-d)) def _sf(self, x, c, d): return np.exp(self._logsf(x, c, d)) def _logsf(self, x, c, d): return sc.xlog1py(-d, x**c) def _ppf(self, q, c, d): # The following is an implementation of # ((1 - q)**(-1.0/d) - 1)**(1.0/c) # that does a better job handling small values of q. return sc.expm1(-1/d * sc.log1p(-q))**(1/c) def _munp(self, n, c, d): nc = 1. * n / c return d * sc.beta(1.0 + nc, d - nc) burr12 = burr12_gen(a=0.0, name='burr12') class fisk_gen(burr_gen): r"""A Fisk continuous random variable. The Fisk distribution is also known as the log-logistic distribution, and equals the Burr distribution with ``d == 1``. `fisk` takes :math:`c` as a shape parameter. %(before_notes)s Notes ----- The probability density function for `fisk` is: .. math:: f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2} for :math:`x > 0`. `fisk` takes :math:`c` as a shape parameters. %(after_notes)s See Also -------- burr %(example)s """ def _pdf(self, x, c): # fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2) return burr_gen._pdf(self, x, c, 1.0) def _cdf(self, x, c): return burr_gen._cdf(self, x, c, 1.0) def _ppf(self, x, c): return burr_gen._ppf(self, x, c, 1.0) def _munp(self, n, c): return burr_gen._munp(self, n, c, 1.0) def _entropy(self, c): return 2 - np.log(c) fisk = fisk_gen(a=0.0, name='fisk') # median = loc class cauchy_gen(rv_continuous): r"""A Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `cauchy` is: .. math:: f(x) = \frac{1}{\pi (1 + x^2)} %(after_notes)s %(example)s """ def _pdf(self, x): # cauchy.pdf(x) = 1 / (pi * (1 + x**2)) return 1.0/np.pi/(1.0+x*x) def _cdf(self, x): return 0.5 + 1.0/np.pi*np.arctan(x) def _ppf(self, q): return np.tan(np.pi*q-np.pi/2.0) def _sf(self, x): return 0.5 - 1.0/np.pi*np.arctan(x) def _isf(self, q): return np.tan(np.pi/2.0-np.pi*q) def _stats(self): return np.nan, np.nan, np.nan, np.nan def _entropy(self): return np.log(4*np.pi) def _fitstart(self, data, args=None): # Initialize ML guesses using quartiles instead of moments. p25, p50, p75 = np.percentile(data, [25, 50, 75]) return p50, (p75 - p25)/2 cauchy = cauchy_gen(name='cauchy') class chi_gen(rv_continuous): r"""A chi continuous random variable. %(before_notes)s Notes ----- The probability density function for `chi` is: .. math:: f(x, df) = \frac{x^{df-1} \exp(-x^2/2)}{2^{df/2-1} \gamma(df/2)} for :math:`x > 0`. Special cases of `chi` are: - ``chi(1, loc, scale)`` is equivalent to `halfnorm` - ``chi(2, 0, scale)`` is equivalent to `rayleigh` - ``chi(3, 0, scale)`` is equivalent to `maxwell` `chi` takes ``df`` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, df): sz, rndm = self._size, self._random_state return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm)) def _pdf(self, x, df): # x**(df-1) * exp(-x**2/2) # chi.pdf(x, df) = ------------------------- # 2**(df/2-1) * gamma(df/2) return np.exp(self._logpdf(x, df)) def _logpdf(self, x, df): l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df) return l + sc.xlogy(df - 1., x) - .5*x**2 def _cdf(self, x, df): return sc.gammainc(.5*df, .5*x**2) def _ppf(self, q, df): return np.sqrt(2*sc.gammaincinv(.5*df, q)) def _stats(self, df): mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0) mu2 = df - mu*mu g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5)) g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1) g2 /= np.asarray(mu2**2.0) return mu, mu2, g1, g2 chi = chi_gen(a=0.0, name='chi') ## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2) class chi2_gen(rv_continuous): r"""A chi-squared continuous random variable. %(before_notes)s Notes ----- The probability density function for `chi2` is: .. math:: f(x, df) = \frac{1}{(2 \gamma(df/2)} (x/2)^{df/2-1} \exp(-x/2) `chi2` takes ``df`` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, df): return self._random_state.chisquare(df, self._size) def _pdf(self, x, df): # chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2) return np.exp(self._logpdf(x, df)) def _logpdf(self, x, df): return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2. def _cdf(self, x, df): return sc.chdtr(df, x) def _sf(self, x, df): return sc.chdtrc(df, x) def _isf(self, p, df): return sc.chdtri(df, p) def _ppf(self, p, df): return self._isf(1.0-p, df) def _stats(self, df): mu = df mu2 = 2*df g1 = 2*np.sqrt(2.0/df) g2 = 12.0/df return mu, mu2, g1, g2 chi2 = chi2_gen(a=0.0, name='chi2') class cosine_gen(rv_continuous): r"""A cosine continuous random variable. %(before_notes)s Notes ----- The cosine distribution is an approximation to the normal distribution. The probability density function for `cosine` is: .. math:: f(x) = \frac{1}{2\pi} (1+\cos(x)) for :math:`-\pi \le x \le \pi`. %(after_notes)s %(example)s """ def _pdf(self, x): # cosine.pdf(x) = 1/(2*pi) * (1+cos(x)) return 1.0/2/np.pi*(1+np.cos(x)) def _cdf(self, x): return 1.0/2/np.pi*(np.pi + x + np.sin(x)) def _stats(self): return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2) def _entropy(self): return np.log(4*np.pi)-1.0 cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine') class dgamma_gen(rv_continuous): r"""A double gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `dgamma` is: .. math:: f(x, a) = \frac{1}{2\gamma(a)} |x|^{a-1} \exp(-|x|) for :math:`a > 0`. `dgamma` takes :math:`a` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, a): sz, rndm = self._size, self._random_state u = rndm.random_sample(size=sz) gm = gamma.rvs(a, size=sz, random_state=rndm) return gm * np.where(u >= 0.5, 1, -1) def _pdf(self, x, a): # dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x)) ax = abs(x) return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax) def _logpdf(self, x, a): ax = abs(x) return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a) def _cdf(self, x, a): fac = 0.5*sc.gammainc(a, abs(x)) return np.where(x > 0, 0.5 + fac, 0.5 - fac) def _sf(self, x, a): fac = 0.5*sc.gammainc(a, abs(x)) return np.where(x > 0, 0.5-fac, 0.5+fac) def _ppf(self, q, a): fac = sc.gammainccinv(a, 1-abs(2*q-1)) return np.where(q > 0.5, fac, -fac) def _stats(self, a): mu2 = a*(a+1.0) return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0 dgamma = dgamma_gen(name='dgamma') class dweibull_gen(rv_continuous): r"""A double Weibull continuous random variable. %(before_notes)s Notes ----- The probability density function for `dweibull` is: .. math:: f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c) `dweibull` takes :math:`d` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, c): sz, rndm = self._size, self._random_state u = rndm.random_sample(size=sz) w = weibull_min.rvs(c, size=sz, random_state=rndm) return w * (np.where(u >= 0.5, 1, -1)) def _pdf(self, x, c): # dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c) ax = abs(x) Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c) return Px def _logpdf(self, x, c): ax = abs(x) return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c def _cdf(self, x, c): Cx1 = 0.5 * np.exp(-abs(x)**c) return np.where(x > 0, 1 - Cx1, Cx1) def _ppf(self, q, c): fac = 2. * np.where(q <= 0.5, q, 1. - q) fac = np.power(-np.log(fac), 1.0 / c) return np.where(q > 0.5, fac, -fac) def _munp(self, n, c): return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c) # since we know that all odd moments are zeros, return them at once. # returning Nones from _stats makes the public stats call _munp # so overall we're saving one or two gamma function evaluations here. def _stats(self, c): return 0, None, 0, None dweibull = dweibull_gen(name='dweibull') ## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale) class expon_gen(rv_continuous): r"""An exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `expon` is: .. math:: f(x) = \exp(-x) for :math:`x \ge 0`. %(after_notes)s A common parameterization for `expon` is in terms of the rate parameter ``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This parameterization corresponds to using ``scale = 1 / lambda``. %(example)s """ def _rvs(self): return self._random_state.standard_exponential(self._size) def _pdf(self, x): # expon.pdf(x) = exp(-x) return np.exp(-x) def _logpdf(self, x): return -x def _cdf(self, x): return -sc.expm1(-x) def _ppf(self, q): return -sc.log1p(-q) def _sf(self, x): return np.exp(-x) def _logsf(self, x): return -x def _isf(self, q): return -np.log(q) def _stats(self): return 1.0, 1.0, 2.0, 6.0 def _entropy(self): return 1.0 @replace_notes_in_docstring(rv_continuous, notes="""\ This function uses explicit formulas for the maximum likelihood estimation of the exponential distribution parameters, so the `optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""") def fit(self, data, *args, **kwds): if len(args) > 0: raise TypeError("Too many arguments.") floc = kwds.pop('floc', None) fscale = kwds.pop('fscale', None) # Ignore the optimizer-related keyword arguments, if given. kwds.pop('loc', None) kwds.pop('scale', None) kwds.pop('optimizer', None) if kwds: raise TypeError("Unknown arguments: %s." % kwds) if floc is not None and fscale is not None: # This check is for consistency with `rv_continuous.fit`. raise ValueError("All parameters fixed. There is nothing to " "optimize.") data = np.asarray(data) data_min = data.min() if floc is None: # ML estimate of the location is the minimum of the data. loc = data_min else: loc = floc if data_min < loc: # There are values that are less than the specified loc. raise FitDataError("expon", lower=floc, upper=np.inf) if fscale is None: # ML estimate of the scale is the shifted mean. scale = data.mean() - loc else: scale = fscale # We expect the return values to be floating point, so ensure it # by explicitly converting to float. return float(loc), float(scale) expon = expon_gen(a=0.0, name='expon') ## Exponentially Modified Normal (exponential distribution ## convolved with a Normal). ## This is called an exponentially modified gaussian on wikipedia class exponnorm_gen(rv_continuous): r"""An exponentially modified Normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `exponnorm` is: .. math:: f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2}\right) \exp(-x / K) \text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right) where the shape parameter :math:`K > 0`. It can be thought of as the sum of a normally distributed random value with mean ``loc`` and sigma ``scale`` and an exponentially distributed random number with a pdf proportional to ``exp(-lambda * x)`` where ``lambda = (K * scale)**(-1)``. %(after_notes)s An alternative parameterization of this distribution (for example, in `Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_) involves three parameters, :math:`\mu`, :math:`\lambda` and :math:`\sigma`. In the present parameterization this corresponds to having ``loc`` and ``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and shape parameter :math:`K = 1/(\sigma\lambda)`. .. versionadded:: 0.16.0 %(example)s """ def _rvs(self, K): expval = self._random_state.standard_exponential(self._size) * K gval = self._random_state.standard_normal(self._size) return expval + gval def _pdf(self, x, K): # exponnorm.pdf(x, K) = # 1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2)) invK = 1.0 / K exparg = 0.5 * invK**2 - invK * x # Avoid overflows; setting np.exp(exparg) to the max float works # all right here expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX) return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2)) def _logpdf(self, x, K): invK = 1.0 / K exparg = 0.5 * invK**2 - invK * x return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2))) def _cdf(self, x, K): invK = 1.0 / K expval = invK * (0.5 * invK - x) return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK) def _sf(self, x, K): invK = 1.0 / K expval = invK * (0.5 * invK - x) return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK) def _stats(self, K): K2 = K * K opK2 = 1.0 + K2 skw = 2 * K**3 * opK2**(-1.5) krt = 6.0 * K2 * K2 * opK2**(-2) return K, opK2, skw, krt exponnorm = exponnorm_gen(name='exponnorm') class exponweib_gen(rv_continuous): r"""An exponentiated Weibull continuous random variable. %(before_notes)s Notes ----- The probability density function for `exponweib` is: .. math:: f(x, a, c) = a c (1-\exp(-x^c))^{a-1} \exp(-x^c) x^{c-1} for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`. `exponweib` takes :math:`a` and :math:`c` as shape parameters. %(after_notes)s %(example)s """ def _pdf(self, x, a, c): # exponweib.pdf(x, a, c) = # a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1) return np.exp(self._logpdf(x, a, c)) def _logpdf(self, x, a, c): negxc = -x**c exm1c = -sc.expm1(negxc) logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) + negxc + sc.xlogy(c - 1.0, x)) return logp def _cdf(self, x, a, c): exm1c = -sc.expm1(-x**c) return exm1c**a def _ppf(self, q, a, c): return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c) exponweib = exponweib_gen(a=0.0, name='exponweib') class exponpow_gen(rv_continuous): r"""An exponential power continuous random variable. %(before_notes)s Notes ----- The probability density function for `exponpow` is: .. math:: f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b)) for :math:`x \ge 0`, :math:`b > 0``. Note that this is a different distribution from the exponential power distribution that is also known under the names "generalized normal" or "generalized Gaussian". `exponpow` takes :math:`b` as a shape parameter. %(after_notes)s References ---------- http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf %(example)s """ def _pdf(self, x, b): # exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b)) return np.exp(self._logpdf(x, b)) def _logpdf(self, x, b): xb = x**b f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb) return f def _cdf(self, x, b): return -sc.expm1(-sc.expm1(x**b)) def _sf(self, x, b): return np.exp(-sc.expm1(x**b)) def _isf(self, x, b): return (sc.log1p(-np.log(x)))**(1./b) def _ppf(self, q, b): return pow(sc.log1p(-sc.log1p(-q)), 1.0/b) exponpow = exponpow_gen(a=0.0, name='exponpow') class fatiguelife_gen(rv_continuous): r"""A fatigue-life (Birnbaum-Saunders) continuous random variable. %(before_notes)s Notes ----- The probability density function for `fatiguelife` is: .. math:: f(x, c) = \frac{x+1}{ 2c\sqrt{2\pi x^3} \exp(-\frac{(x-1)^2}{2x c^2}} for :math:`x > 0`. `fatiguelife` takes :math:`c` as a shape parameter. %(after_notes)s References ---------- .. [1] "Birnbaum-Saunders distribution", http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self, c): z = self._random_state.standard_normal(self._size) x = 0.5*c*z x2 = x*x t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2) return t def _pdf(self, x, c): # fatiguelife.pdf(x, c) = # (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2)) return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) - 0.5*(np.log(2*np.pi) + 3*np.log(x))) def _cdf(self, x, c): return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x))) def _ppf(self, q, c): tmp = c*sc.ndtri(q) return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2 def _stats(self, c): # NB: the formula for kurtosis in wikipedia seems to have an error: # it's 40, not 41. At least it disagrees with the one from Wolfram # Alpha. And the latter one, below, passes the tests, while the wiki # one doesn't So far I didn't have the guts to actually check the # coefficients from the expressions for the raw moments. c2 = c*c mu = c2 / 2.0 + 1.0 den = 5.0 * c2 + 4.0 mu2 = c2*den / 4.0 g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5) g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0 return mu, mu2, g1, g2 fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife') class foldcauchy_gen(rv_continuous): r"""A folded Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `foldcauchy` is: .. math:: f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)} for :math:`x \ge 0``. `foldcauchy` takes :math:`c` as a shape parameter. %(example)s """ def _rvs(self, c): return abs(cauchy.rvs(loc=c, size=self._size, random_state=self._random_state)) def _pdf(self, x, c): # foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2)) return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2)) def _cdf(self, x, c): return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c)) def _stats(self, c): return np.inf, np.inf, np.nan, np.nan foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy') class f_gen(rv_continuous): r"""An F continuous random variable. %(before_notes)s Notes ----- The probability density function for `f` is: .. math:: f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}} {(df_2+df_1 x)^{(df_1+df_2)/2} B(df_1/2, df_2/2)} for :math:`x > 0`. `f` takes ``dfn`` and ``dfd`` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, dfn, dfd): return self._random_state.f(dfn, dfd, self._size) def _pdf(self, x, dfn, dfd): # df2**(df2/2) * df1**(df1/2) * x**(df1/2-1) # F.pdf(x, df1, df2) = -------------------------------------------- # (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2) return np.exp(self._logpdf(x, dfn, dfd)) def _logpdf(self, x, dfn, dfd): n = 1.0 * dfn m = 1.0 * dfd lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x) lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2) return lPx def _cdf(self, x, dfn, dfd): return sc.fdtr(dfn, dfd, x) def _sf(self, x, dfn, dfd): return sc.fdtrc(dfn, dfd, x) def _ppf(self, q, dfn, dfd): return sc.fdtri(dfn, dfd, q) def _stats(self, dfn, dfd): v1, v2 = 1. * dfn, 1. * dfd v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8. mu = _lazywhere( v2 > 2, (v2, v2_2), lambda v2, v2_2: v2 / v2_2, np.inf) mu2 = _lazywhere( v2 > 4, (v1, v2, v2_2, v2_4), lambda v1, v2, v2_2, v2_4: 2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4), np.inf) g1 = _lazywhere( v2 > 6, (v1, v2_2, v2_4, v2_6), lambda v1, v2_2, v2_4, v2_6: (2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))), np.nan) g1 *= np.sqrt(8.) g2 = _lazywhere( v2 > 8, (g1, v2_6, v2_8), lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8, np.nan) g2 *= 3. / 2. return mu, mu2, g1, g2 f = f_gen(a=0.0, name='f') ## Folded Normal ## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S) ## ## note: regress docs have scale parameter correct, but first parameter ## he gives is a shape parameter A = c * scale ## Half-normal is folded normal with shape-parameter c=0. class foldnorm_gen(rv_continuous): r"""A folded normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `foldnorm` is: .. math:: f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2}) for :math:`c \ge 0`. `foldnorm` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, c): return c >= 0 def _rvs(self, c): return abs(self._random_state.standard_normal(self._size) + c) def _pdf(self, x, c): # foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2) return _norm_pdf(x + c) + _norm_pdf(x-c) def _cdf(self, x, c): return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0 def _stats(self, c): # Regina C. Elandt, Technometrics 3, 551 (1961) # http://www.jstor.org/stable/1266561 # c2 = c*c expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi) mu = 2.*expfac + c * sc.erf(c/np.sqrt(2)) mu2 = c2 + 1 - mu*mu g1 = 2. * (mu*mu*mu - c2*mu - expfac) g1 /= np.power(mu2, 1.5) g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2 g2 = g2 / mu2**2.0 - 3. return mu, mu2, g1, g2 foldnorm = foldnorm_gen(a=0.0, name='foldnorm') class weibull_min_gen(rv_continuous): r"""Weibull minimum continuous random variable. %(before_notes)s See Also -------- weibull_max Notes ----- The probability density function for `weibull_min` is: .. math:: f(x, c) = c x^{c-1} \exp(-x^c) for :math:`x > 0`, :math:`c > 0`. `weibull_min` takes ``c`` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c) return c*pow(x, c-1)*np.exp(-pow(x, c)) def _logpdf(self, x, c): return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c) def _cdf(self, x, c): return -sc.expm1(-pow(x, c)) def _sf(self, x, c): return np.exp(-pow(x, c)) def _logsf(self, x, c): return -pow(x, c) def _ppf(self, q, c): return pow(-sc.log1p(-q), 1.0/c) def _munp(self, n, c): return sc.gamma(1.0+n*1.0/c) def _entropy(self, c): return -_EULER / c - np.log(c) + _EULER + 1 weibull_min = weibull_min_gen(a=0.0, name='weibull_min') class weibull_max_gen(rv_continuous): r"""Weibull maximum continuous random variable. %(before_notes)s See Also -------- weibull_min Notes ----- The probability density function for `weibull_max` is: .. math:: f(x, c) = c (-x)^{c-1} \exp(-(-x)^c) for :math:`x < 0`, :math:`c > 0`. `weibull_max` takes ``c`` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c) return c*pow(-x, c-1)*np.exp(-pow(-x, c)) def _logpdf(self, x, c): return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c) def _cdf(self, x, c): return np.exp(-pow(-x, c)) def _logcdf(self, x, c): return -pow(-x, c) def _sf(self, x, c): return -sc.expm1(-pow(-x, c)) def _ppf(self, q, c): return -pow(-np.log(q), 1.0/c) def _munp(self, n, c): val = sc.gamma(1.0+n*1.0/c) if int(n) % 2: sgn = -1 else: sgn = 1 return sgn * val def _entropy(self, c): return -_EULER / c - np.log(c) + _EULER + 1 weibull_max = weibull_max_gen(b=0.0, name='weibull_max') # Public methods to be deprecated in frechet_r and frechet_l: # ['__call__', 'cdf', 'entropy', 'expect', 'fit', 'fit_loc_scale', 'freeze', # 'interval', 'isf', 'logcdf', 'logpdf', 'logsf', 'mean', 'median', 'moment', # 'nnlf', 'pdf', 'ppf', 'rvs', 'sf', 'stats', 'std', 'var'] _frechet_r_deprec_msg = """\ The distribution `frechet_r` is a synonym for `weibull_min`; this historical usage is deprecated because of possible confusion with the (quite different) Frechet distribution. To preserve the existing behavior of the program, use `scipy.stats.weibull_min`. For the Frechet distribution (i.e. the Type II extreme value distribution), use `scipy.stats.invweibull`.""" class frechet_r_gen(weibull_min_gen): @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def __call__(self, *args, **kwargs): return weibull_min_gen.__call__(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def cdf(self, *args, **kwargs): return weibull_min_gen.cdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def entropy(self, *args, **kwargs): return weibull_min_gen.entropy(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def expect(self, *args, **kwargs): return weibull_min_gen.expect(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def fit(self, *args, **kwargs): return weibull_min_gen.fit(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def fit_loc_scale(self, *args, **kwargs): return weibull_min_gen.fit_loc_scale(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def freeze(self, *args, **kwargs): return weibull_min_gen.freeze(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def interval(self, *args, **kwargs): return weibull_min_gen.interval(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def isf(self, *args, **kwargs): return weibull_min_gen.isf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def logcdf(self, *args, **kwargs): return weibull_min_gen.logcdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def logpdf(self, *args, **kwargs): return weibull_min_gen.logpdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def logsf(self, *args, **kwargs): return weibull_min_gen.logsf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def mean(self, *args, **kwargs): return weibull_min_gen.mean(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def median(self, *args, **kwargs): return weibull_min_gen.median(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def moment(self, *args, **kwargs): return weibull_min_gen.moment(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def nnlf(self, *args, **kwargs): return weibull_min_gen.nnlf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def pdf(self, *args, **kwargs): return weibull_min_gen.pdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def ppf(self, *args, **kwargs): return weibull_min_gen.ppf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def rvs(self, *args, **kwargs): return weibull_min_gen.rvs(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def sf(self, *args, **kwargs): return weibull_min_gen.sf(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def stats(self, *args, **kwargs): return weibull_min_gen.stats(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def std(self, *args, **kwargs): return weibull_min_gen.std(self, *args, **kwargs) @np.deprecate(old_name='frechet_r', message=_frechet_r_deprec_msg) def var(self, *args, **kwargs): return weibull_min_gen.var(self, *args, **kwargs) frechet_r = frechet_r_gen(a=0.0, name='frechet_r') _frechet_l_deprec_msg = """\ The distribution `frechet_l` is a synonym for `weibull_max`; this historical usage is deprecated because of possible confusion with the (quite different) Frechet distribution. To preserve the existing behavior of the program, use `scipy.stats.weibull_max`. For the Frechet distribution (i.e. the Type II extreme value distribution), use `scipy.stats.invweibull`.""" class frechet_l_gen(weibull_max_gen): @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def __call__(self, *args, **kwargs): return weibull_max_gen.__call__(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def cdf(self, *args, **kwargs): return weibull_max_gen.cdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def entropy(self, *args, **kwargs): return weibull_max_gen.entropy(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def expect(self, *args, **kwargs): return weibull_max_gen.expect(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def fit(self, *args, **kwargs): return weibull_max_gen.fit(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def fit_loc_scale(self, *args, **kwargs): return weibull_max_gen.fit_loc_scale(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def freeze(self, *args, **kwargs): return weibull_max_gen.freeze(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def interval(self, *args, **kwargs): return weibull_max_gen.interval(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def isf(self, *args, **kwargs): return weibull_max_gen.isf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def logcdf(self, *args, **kwargs): return weibull_max_gen.logcdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def logpdf(self, *args, **kwargs): return weibull_max_gen.logpdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def logsf(self, *args, **kwargs): return weibull_max_gen.logsf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def mean(self, *args, **kwargs): return weibull_max_gen.mean(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def median(self, *args, **kwargs): return weibull_max_gen.median(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def moment(self, *args, **kwargs): return weibull_max_gen.moment(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def nnlf(self, *args, **kwargs): return weibull_max_gen.nnlf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def pdf(self, *args, **kwargs): return weibull_max_gen.pdf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def ppf(self, *args, **kwargs): return weibull_max_gen.ppf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def rvs(self, *args, **kwargs): return weibull_max_gen.rvs(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def sf(self, *args, **kwargs): return weibull_max_gen.sf(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def stats(self, *args, **kwargs): return weibull_max_gen.stats(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def std(self, *args, **kwargs): return weibull_max_gen.std(self, *args, **kwargs) @np.deprecate(old_name='frechet_l', message=_frechet_l_deprec_msg) def var(self, *args, **kwargs): return weibull_max_gen.var(self, *args, **kwargs) frechet_l = frechet_l_gen(b=0.0, name='frechet_l') class genlogistic_gen(rv_continuous): r"""A generalized logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `genlogistic` is: .. math:: f(x, c) = c \frac{\exp(-x)} {(1 + \exp(-x))^{c+1}} for :math:`x > 0`, :math:`c > 0`. `genlogistic` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1) return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x)) def _cdf(self, x, c): Cx = (1+np.exp(-x))**(-c) return Cx def _ppf(self, q, c): vals = -np.log(pow(q, -1.0/c)-1) return vals def _stats(self, c): mu = _EULER + sc.psi(c) mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c) g1 = -2*sc.zeta(3, c) + 2*_ZETA3 g1 /= np.power(mu2, 1.5) g2 = np.pi**4/15.0 + 6*sc.zeta(4, c) g2 /= mu2**2.0 return mu, mu2, g1, g2 genlogistic = genlogistic_gen(name='genlogistic') class genpareto_gen(rv_continuous): r"""A generalized Pareto continuous random variable. %(before_notes)s Notes ----- The probability density function for `genpareto` is: .. math:: f(x, c) = (1 + c x)^{-1 - 1/c} defined for :math:`x \ge 0` if :math:`c \ge 0`, and for :math:`0 \le x \le -1/c` if :math:`c < 0`. `genpareto` takes :math:`c` as a shape parameter. For ``c == 0``, `genpareto` reduces to the exponential distribution, `expon`: .. math:: f(x, c=0) = \exp(-x) For ``c == -1``, `genpareto` is uniform on ``[0, 1]``: .. math:: f(x, c=-1) = x %(after_notes)s %(example)s """ def _argcheck(self, c): c = np.asarray(c) self.b = _lazywhere(c < 0, (c,), lambda c: -1. / c, np.inf) return True def _pdf(self, x, c): # genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c) return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): return _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: -sc.xlog1py(c + 1., c*x) / c, -x) def _cdf(self, x, c): return -sc.inv_boxcox1p(-x, -c) def _sf(self, x, c): return sc.inv_boxcox(-x, -c) def _logsf(self, x, c): return _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: -sc.log1p(c*x) / c, -x) def _ppf(self, q, c): return -sc.boxcox1p(-q, -c) def _isf(self, q, c): return -sc.boxcox(q, -c) def _munp(self, n, c): def __munp(n, c): val = 0.0 k = np.arange(0, n + 1) for ki, cnk in zip(k, sc.comb(n, k)): val = val + cnk * (-1) ** ki / (1.0 - c * ki) return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf) return _lazywhere(c != 0, (c,), lambda c: __munp(n, c), sc.gamma(n + 1)) def _entropy(self, c): return 1. + c genpareto = genpareto_gen(a=0.0, name='genpareto') class genexpon_gen(rv_continuous): r"""A generalized exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `genexpon` is: .. math:: f(x, a, b, c) = (a + b (1 - \exp(-c x))) \exp(-a x - b x + \frac{b}{c} (1-\exp(-c x))) for :math:`x \ge 0`, :math:`a, b, c > 0`. `genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters. %(after_notes)s References ---------- H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential Distribution", Journal of the American Statistical Association, 1993. N. Balakrishnan, "The Exponential Distribution: Theory, Methods and Applications", Asit P. Basu. %(example)s """ def _pdf(self, x, a, b, c): # genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \ # exp(-a*x - b*x + b/c * (1-exp(-c*x))) return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x + b*(-sc.expm1(-c*x))/c) def _cdf(self, x, a, b, c): return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c) def _logpdf(self, x, a, b, c): return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c genexpon = genexpon_gen(a=0.0, name='genexpon') class genextreme_gen(rv_continuous): r"""A generalized extreme value continuous random variable. %(before_notes)s See Also -------- gumbel_r Notes ----- For :math:`c=0`, `genextreme` is equal to `gumbel_r`. The probability density function for `genextreme` is: .. math:: f(x, c) = \begin{cases} \exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\ \exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for } x \le 1/c, c > 0 \end{cases} Note that several sources and software packages use the opposite convention for the sign of the shape parameter :math:`c`. `genextreme` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, c): self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf) self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf) return np.where(abs(c) == np.inf, 0, 1) def _loglogcdf(self, x, c): return _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: sc.log1p(-c*x)/c, -x) def _pdf(self, x, c): # genextreme.pdf(x, c) = # exp(-exp(-x))*exp(-x), for c==0 # exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0 return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0) logex2 = sc.log1p(-cx) logpex2 = self._loglogcdf(x, c) pex2 = np.exp(logpex2) # Handle special cases np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0) logpdf = np.where((cx == 1) | (cx == -np.inf), -np.inf, -pex2+logpex2-logex2) np.putmask(logpdf, (c == 1) & (x == 1), 0.0) return logpdf def _logcdf(self, x, c): return -np.exp(self._loglogcdf(x, c)) def _cdf(self, x, c): return np.exp(self._logcdf(x, c)) def _sf(self, x, c): return -sc.expm1(self._logcdf(x, c)) def _ppf(self, q, c): x = -np.log(-np.log(q)) return _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: -sc.expm1(-c * x) / c, x) def _isf(self, q, c): x = -np.log(-sc.log1p(-q)) return _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: -sc.expm1(-c * x) / c, x) def _stats(self, c): g = lambda n: sc.gamma(n*c + 1) g1 = g(1) g2 = g(2) g3 = g(3) g4 = g(4) g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0) gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0, sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0) eps = 1e-14 gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c) m = np.where(c < -1.0, np.nan, -gamk) v = np.where(c < -0.5, np.nan, g1**2.0*gam2k) # skewness sk1 = _lazywhere(c >= -1./3, (c, g1, g2, g3, g2mg12), lambda c, g1, g2, g3, g2gm12: np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5, fillvalue=np.nan) sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1) # kurtosis ku1 = _lazywhere(c >= -1./4, (g1, g2, g3, g4, g2mg12), lambda g1, g2, g3, g4, g2mg12: (g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2, fillvalue=np.nan) ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0) return m, v, sk, ku def _fitstart(self, data): # This is better than the default shape of (1,). g = _skew(data) if g < 0: a = 0.5 else: a = -0.5 return super(genextreme_gen, self)._fitstart(data, args=(a,)) def _munp(self, n, c): k = np.arange(0, n+1) vals = 1.0/c**n * np.sum( sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1), axis=0) return np.where(c*n > -1, vals, np.inf) def _entropy(self, c): return _EULER*(1 - c) + 1 genextreme = genextreme_gen(name='genextreme') def _digammainv(y): # Inverse of the digamma function (real positive arguments only). # This function is used in the `fit` method of `gamma_gen`. # The function uses either optimize.fsolve or optimize.newton # to solve `sc.digamma(x) - y = 0`. There is probably room for # improvement, but currently it works over a wide range of y: # >>> y = 64*np.random.randn(1000000) # >>> y.min(), y.max() # (-311.43592651416662, 351.77388222276869) # x = [_digammainv(t) for t in y] # np.abs(sc.digamma(x) - y).max() # 1.1368683772161603e-13 # _em = 0.5772156649015328606065120 func = lambda x: sc.digamma(x) - y if y > -0.125: x0 = np.exp(y) + 0.5 if y < 10: # Some experimentation shows that newton reliably converges # must faster than fsolve in this y range. For larger y, # newton sometimes fails to converge. value = optimize.newton(func, x0, tol=1e-10) return value elif y > -3: x0 = np.exp(y/2.332) + 0.08661 else: x0 = 1.0 / (-y - _em) value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11, full_output=True) if ier != 1: raise RuntimeError("_digammainv: fsolve failed, y = %r" % y) return value[0] ## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition) ## gamma(a, loc, scale) with a an integer is the Erlang distribution ## gamma(1, loc, scale) is the Exponential distribution ## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom. class gamma_gen(rv_continuous): r"""A gamma continuous random variable. %(before_notes)s See Also -------- erlang, expon Notes ----- The probability density function for `gamma` is: .. math:: f(x, a) = \frac{x^{a-1} \exp(-x)}{\Gamma(a)} for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the gamma function. `gamma` has a shape parameter `a` which needs to be set explicitly. When :math:`a` is an integer, `gamma` reduces to the Erlang distribution, and when :math:`a=1` to the exponential distribution. %(after_notes)s %(example)s """ def _rvs(self, a): return self._random_state.standard_gamma(a, self._size) def _pdf(self, x, a): # gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a) return np.exp(self._logpdf(x, a)) def _logpdf(self, x, a): return sc.xlogy(a-1.0, x) - x - sc.gammaln(a) def _cdf(self, x, a): return sc.gammainc(a, x) def _sf(self, x, a): return sc.gammaincc(a, x) def _ppf(self, q, a): return sc.gammaincinv(a, q) def _stats(self, a): return a, a, 2.0/np.sqrt(a), 6.0/a def _entropy(self, a): return sc.psi(a)*(1-a) + a + sc.gammaln(a) def _fitstart(self, data): # The skewness of the gamma distribution is `4 / np.sqrt(a)`. # We invert that to estimate the shape `a` using the skewness # of the data. The formula is regularized with 1e-8 in the # denominator to allow for degenerate data where the skewness # is close to 0. a = 4 / (1e-8 + _skew(data)**2) return super(gamma_gen, self)._fitstart(data, args=(a,)) @extend_notes_in_docstring(rv_continuous, notes="""\ When the location is fixed by using the argument `floc`, this function uses explicit formulas or solves a simpler numerical problem than the full ML optimization problem. So in that case, the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""") def fit(self, data, *args, **kwds): f0 = (kwds.get('f0', None) or kwds.get('fa', None) or kwds.get('fix_a', None)) floc = kwds.get('floc', None) fscale = kwds.get('fscale', None) if floc is None: # loc is not fixed. Use the default fit method. return super(gamma_gen, self).fit(data, *args, **kwds) # Special case: loc is fixed. if f0 is not None and fscale is not None: # This check is for consistency with `rv_continuous.fit`. # Without this check, this function would just return the # parameters that were given. raise ValueError("All parameters fixed. There is nothing to " "optimize.") # Fixed location is handled by shifting the data. data = np.asarray(data) if np.any(data <= floc): raise FitDataError("gamma", lower=floc, upper=np.inf) if floc != 0: # Don't do the subtraction in-place, because `data` might be a # view of the input array. data = data - floc xbar = data.mean() # Three cases to handle: # * shape and scale both free # * shape fixed, scale free # * shape free, scale fixed if fscale is None: # scale is free if f0 is not None: # shape is fixed a = f0 else: # shape and scale are both free. # The MLE for the shape parameter `a` is the solution to: # np.log(a) - sc.digamma(a) - np.log(xbar) + # np.log(data.mean) = 0 s = np.log(xbar) - np.log(data).mean() func = lambda a: np.log(a) - sc.digamma(a) - s aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s) xa = aest*(1-0.4) xb = aest*(1+0.4) a = optimize.brentq(func, xa, xb, disp=0) # The MLE for the scale parameter is just the data mean # divided by the shape parameter. scale = xbar / a else: # scale is fixed, shape is free # The MLE for the shape parameter `a` is the solution to: # sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0 c = np.log(data).mean() - np.log(fscale) a = _digammainv(c) scale = fscale return a, floc, scale gamma = gamma_gen(a=0.0, name='gamma') class erlang_gen(gamma_gen): """An Erlang continuous random variable. %(before_notes)s See Also -------- gamma Notes ----- The Erlang distribution is a special case of the Gamma distribution, with the shape parameter `a` an integer. Note that this restriction is not enforced by `erlang`. It will, however, generate a warning the first time a non-integer value is used for the shape parameter. Refer to `gamma` for examples. """ def _argcheck(self, a): allint = np.all(np.floor(a) == a) allpos = np.all(a > 0) if not allint: # An Erlang distribution shouldn't really have a non-integer # shape parameter, so warn the user. warnings.warn( 'The shape parameter of the erlang distribution ' 'has been given a non-integer value %r.' % (a,), RuntimeWarning) return allpos def _fitstart(self, data): # Override gamma_gen_fitstart so that an integer initial value is # used. (Also regularize the division, to avoid issues when # _skew(data) is 0 or close to 0.) a = int(4.0 / (1e-8 + _skew(data)**2)) return super(gamma_gen, self)._fitstart(data, args=(a,)) # Trivial override of the fit method, so we can monkey-patch its # docstring. def fit(self, data, *args, **kwds): return super(erlang_gen, self).fit(data, *args, **kwds) if fit.__doc__ is not None: fit.__doc__ = (rv_continuous.fit.__doc__ + """ Notes ----- The Erlang distribution is generally defined to have integer values for the shape parameter. This is not enforced by the `erlang` class. When fitting the distribution, it will generally return a non-integer value for the shape parameter. By using the keyword argument `f0=<integer>`, the fit method can be constrained to fit the data to a specific integer shape parameter. """) erlang = erlang_gen(a=0.0, name='erlang') class gengamma_gen(rv_continuous): r"""A generalized gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `gengamma` is: .. math:: f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\gamma(a)} for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`. `gengamma` takes :math:`a` and :math:`c` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, a, c): return (a > 0) & (c != 0) def _pdf(self, x, a, c): # gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a) return np.exp(self._logpdf(x, a, c)) def _logpdf(self, x, a, c): return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a) def _cdf(self, x, a, c): xc = x**c val1 = sc.gammainc(a, xc) val2 = sc.gammaincc(a, xc) return np.where(c > 0, val1, val2) def _sf(self, x, a, c): xc = x**c val1 = sc.gammainc(a, xc) val2 = sc.gammaincc(a, xc) return np.where(c > 0, val2, val1) def _ppf(self, q, a, c): val1 = sc.gammaincinv(a, q) val2 = sc.gammainccinv(a, q) return np.where(c > 0, val1, val2)**(1.0/c) def _isf(self, q, a, c): val1 = sc.gammaincinv(a, q) val2 = sc.gammainccinv(a, q) return np.where(c > 0, val2, val1)**(1.0/c) def _munp(self, n, a, c): # Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a) return sc.poch(a, n*1.0/c) def _entropy(self, a, c): val = sc.psi(a) return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c)) gengamma = gengamma_gen(a=0.0, name='gengamma') class genhalflogistic_gen(rv_continuous): r"""A generalized half-logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `genhalflogistic` is: .. math:: f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2} for :math:`0 \le x \le 1/c`, and :math:`c > 0`. `genhalflogistic` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, c): self.b = 1.0 / c return c > 0 def _pdf(self, x, c): # genhalflogistic.pdf(x, c) = # 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2 limit = 1.0/c tmp = np.asarray(1-c*x) tmp0 = tmp**(limit-1) tmp2 = tmp0*tmp return 2*tmp0 / (1+tmp2)**2 def _cdf(self, x, c): limit = 1.0/c tmp = np.asarray(1-c*x) tmp2 = tmp**(limit) return (1.0-tmp2) / (1+tmp2) def _ppf(self, q, c): return 1.0/c*(1-((1.0-q)/(1.0+q))**c) def _entropy(self, c): return 2 - (2*c+1)*np.log(2) genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic') class gompertz_gen(rv_continuous): r"""A Gompertz (or truncated Gumbel) continuous random variable. %(before_notes)s Notes ----- The probability density function for `gompertz` is: .. math:: f(x, c) = c \exp(x) \exp(-c (e^x-1)) for :math:`x \ge 0`, :math:`c > 0`. `gompertz` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1)) return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): return np.log(c) + x - c * sc.expm1(x) def _cdf(self, x, c): return -sc.expm1(-c * sc.expm1(x)) def _ppf(self, q, c): return sc.log1p(-1.0 / c * sc.log1p(-q)) def _entropy(self, c): return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c) gompertz = gompertz_gen(a=0.0, name='gompertz') class gumbel_r_gen(rv_continuous): r"""A right-skewed Gumbel continuous random variable. %(before_notes)s See Also -------- gumbel_l, gompertz, genextreme Notes ----- The probability density function for `gumbel_r` is: .. math:: f(x) = \exp(-(x + e^{-x})) The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett distribution. It is also related to the extreme value distribution, log-Weibull and Gompertz distributions. %(after_notes)s %(example)s """ def _pdf(self, x): # gumbel_r.pdf(x) = exp(-(x + exp(-x))) return np.exp(self._logpdf(x)) def _logpdf(self, x): return -x - np.exp(-x) def _cdf(self, x): return np.exp(-np.exp(-x)) def _logcdf(self, x): return -np.exp(-x) def _ppf(self, q): return -np.log(-np.log(q)) def _stats(self): return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5 def _entropy(self): # http://en.wikipedia.org/wiki/Gumbel_distribution return _EULER + 1. gumbel_r = gumbel_r_gen(name='gumbel_r') class gumbel_l_gen(rv_continuous): r"""A left-skewed Gumbel continuous random variable. %(before_notes)s See Also -------- gumbel_r, gompertz, genextreme Notes ----- The probability density function for `gumbel_l` is: .. math:: f(x) = \exp(x - e^x) The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett distribution. It is also related to the extreme value distribution, log-Weibull and Gompertz distributions. %(after_notes)s %(example)s """ def _pdf(self, x): # gumbel_l.pdf(x) = exp(x - exp(x)) return np.exp(self._logpdf(x)) def _logpdf(self, x): return x - np.exp(x) def _cdf(self, x): return -sc.expm1(-np.exp(x)) def _ppf(self, q): return np.log(-sc.log1p(-q)) def _logsf(self, x): return -np.exp(x) def _sf(self, x): return np.exp(-np.exp(x)) def _isf(self, x): return np.log(-np.log(x)) def _stats(self): return -_EULER, np.pi*np.pi/6.0, \ -12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5 def _entropy(self): return _EULER + 1. gumbel_l = gumbel_l_gen(name='gumbel_l') class halfcauchy_gen(rv_continuous): r"""A Half-Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `halfcauchy` is: .. math:: f(x) = \frac{2}{\pi (1 + x^2)} for :math:`x \ge 0`. %(after_notes)s %(example)s """ def _pdf(self, x): # halfcauchy.pdf(x) = 2 / (pi * (1 + x**2)) return 2.0/np.pi/(1.0+x*x) def _logpdf(self, x): return np.log(2.0/np.pi) - sc.log1p(x*x) def _cdf(self, x): return 2.0/np.pi*np.arctan(x) def _ppf(self, q): return np.tan(np.pi/2*q) def _stats(self): return np.inf, np.inf, np.nan, np.nan def _entropy(self): return np.log(2*np.pi) halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy') class halflogistic_gen(rv_continuous): r"""A half-logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `halflogistic` is: .. math:: f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 } = \frac{1}{2} sech(x/2)^2 for :math:`x \ge 0`. %(after_notes)s %(example)s """ def _pdf(self, x): # halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 # = 1/2 * sech(x/2)**2 return np.exp(self._logpdf(x)) def _logpdf(self, x): return np.log(2) - x - 2. * sc.log1p(np.exp(-x)) def _cdf(self, x): return np.tanh(x/2.0) def _ppf(self, q): return 2*np.arctanh(q) def _munp(self, n): if n == 1: return 2*np.log(2) if n == 2: return np.pi*np.pi/3.0 if n == 3: return 9*_ZETA3 if n == 4: return 7*np.pi**4 / 15.0 return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1) def _entropy(self): return 2-np.log(2) halflogistic = halflogistic_gen(a=0.0, name='halflogistic') class halfnorm_gen(rv_continuous): r"""A half-normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `halfnorm` is: .. math:: f(x) = \sqrt{2/\pi} e^{-\frac{x^2}{2}} for :math:`x > 0`. `halfnorm` is a special case of :math`\chi` with ``df == 1``. %(after_notes)s %(example)s """ def _rvs(self): return abs(self._random_state.standard_normal(size=self._size)) def _pdf(self, x): # halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2) return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0) def _logpdf(self, x): return 0.5 * np.log(2.0/np.pi) - x*x/2.0 def _cdf(self, x): return _norm_cdf(x)*2-1.0 def _ppf(self, q): return sc.ndtri((1+q)/2.0) def _stats(self): return (np.sqrt(2.0/np.pi), 1-2.0/np.pi, np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5, 8*(np.pi-3)/(np.pi-2)**2) def _entropy(self): return 0.5*np.log(np.pi/2.0)+0.5 halfnorm = halfnorm_gen(a=0.0, name='halfnorm') class hypsecant_gen(rv_continuous): r"""A hyperbolic secant continuous random variable. %(before_notes)s Notes ----- The probability density function for `hypsecant` is: .. math:: f(x) = \frac{1}{\pi} sech(x) %(after_notes)s %(example)s """ def _pdf(self, x): # hypsecant.pdf(x) = 1/pi * sech(x) return 1.0/(np.pi*np.cosh(x)) def _cdf(self, x): return 2.0/np.pi*np.arctan(np.exp(x)) def _ppf(self, q): return np.log(np.tan(np.pi*q/2.0)) def _stats(self): return 0, np.pi*np.pi/4, 0, 2 def _entropy(self): return np.log(2*np.pi) hypsecant = hypsecant_gen(name='hypsecant') class gausshyper_gen(rv_continuous): r"""A Gauss hypergeometric continuous random variable. %(before_notes)s Notes ----- The probability density function for `gausshyper` is: .. math:: f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c} for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and :math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}` `gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, a, b, c, z): return (a > 0) & (b > 0) & (c == c) & (z == z) def _pdf(self, x, a, b, c, z): # gausshyper.pdf(x, a, b, c, z) = # C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c) Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z) return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c def _munp(self, n, a, b, c, z): fac = sc.beta(n+a, b) / sc.beta(a, b) num = sc.hyp2f1(c, a+n, a+b+n, -z) den = sc.hyp2f1(c, a, a+b, -z) return fac*num / den gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper') class invgamma_gen(rv_continuous): r"""An inverted gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `invgamma` is: .. math:: f(x, a) = \frac{x^{-a-1}}{\gamma(a)} \exp(-\frac{1}{x}) for :math:`x > 0`, :math:`a > 0`. `invgamma` takes :math:`a` as a shape parameter. `invgamma` is a special case of `gengamma` with ``c == -1``. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, a): # invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x) return np.exp(self._logpdf(x, a)) def _logpdf(self, x, a): return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x def _cdf(self, x, a): return sc.gammaincc(a, 1.0 / x) def _ppf(self, q, a): return 1.0 / sc.gammainccinv(a, q) def _sf(self, x, a): return sc.gammainc(a, 1.0 / x) def _isf(self, q, a): return 1.0 / sc.gammaincinv(a, q) def _stats(self, a, moments='mvsk'): m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf) m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.), np.inf) g1, g2 = None, None if 's' in moments: g1 = _lazywhere( a > 3, (a,), lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan) if 'k' in moments: g2 = _lazywhere( a > 4, (a,), lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan) return m1, m2, g1, g2 def _entropy(self, a): return a - (a+1.0) * sc.psi(a) + sc.gammaln(a) invgamma = invgamma_gen(a=0.0, name='invgamma') # scale is gamma from DATAPLOT and B from Regress class invgauss_gen(rv_continuous): r"""An inverse Gaussian continuous random variable. %(before_notes)s Notes ----- The probability density function for `invgauss` is: .. math:: f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}} \exp(-\frac{(x-\mu)^2}{2 x \mu^2}) for :math:`x > 0`. `invgauss` takes :math:`\mu` as a shape parameter. %(after_notes)s When :math:`\mu` is too small, evaluating the cumulative distribution function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``. NaNs are returned for :math:`\mu \le 0.0028`. %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self, mu): return self._random_state.wald(mu, 1.0, size=self._size) def _pdf(self, x, mu): # invgauss.pdf(x, mu) = # 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2)) return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2) def _logpdf(self, x, mu): return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x) def _cdf(self, x, mu): fac = np.sqrt(1.0/x) # Numerical accuracy for small `mu` is bad. See #869. C1 = _norm_cdf(fac*(x-mu)/mu) C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu) return C1 def _stats(self, mu): return mu, mu**3.0, 3*np.sqrt(mu), 15*mu invgauss = invgauss_gen(a=0.0, name='invgauss') class norminvgauss_gen(rv_continuous): r"""A Normal Inverse Gaussian continuous random variable. %(before_notes)s Notes ----- The probability density function for `norminvgauss` is: .. math:: f(x; a, b) = (a \exp(\sqrt{a^2 - b^2} + b x)) / (\pi \sqrt{1 + x^2} \, K_1(a * \sqrt{1 + x^2})) where `x` is a real number, the parameter `a` is the tail heaviness and `b` is the asymmetry parameter satisfying `a > 0` and `abs(b) <= a`. `K_1` is the modified Bessel function of second kind (`scipy.special.k1`). %(after_notes)s A normal inverse Gaussian random variable with parameters `a` and `b` can be expressed as `X = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is `invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used to generate random variates. References ---------- O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3), pp. 151-157, 1978. O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24, pp. 1–13, 1997. %(example)s """ _support_mask = rv_continuous._open_support_mask def _argcheck(self, a, b): return (a > 0) & (np.absolute(b) < a) def _pdf(self, x, a, b): gamma = np.sqrt(a**2 - b**2) fac1 = a / np.pi * np.exp(gamma) sq = np.hypot(1, x) # reduce overflows return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq def _rvs(self, a, b): # note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard # normal and V is invgauss(mu=1/sqrt(a**2 - b**2)) gamma = np.sqrt(a**2 - b**2) sz, rndm = self._size, self._random_state ig = invgauss.rvs(mu=1/gamma, size=sz, random_state=rndm) return b * ig + np.sqrt(ig) * norm.rvs(size=sz, random_state=rndm) def _stats(self, a, b): gamma = np.sqrt(a**2 - b**2) mean = b / gamma variance = a**2 / gamma**3 skewness = 3.0 * b / (a * np.sqrt(gamma)) kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma return mean, variance, skewness, kurtosis norminvgauss = norminvgauss_gen(name="norminvgauss") class invweibull_gen(rv_continuous): r"""An inverted Weibull continuous random variable. This distribution is also known as the Fréchet distribution or the type II extreme value distribution. %(before_notes)s Notes ----- The probability density function for `invweibull` is: .. math:: f(x, c) = c x^{-c-1} \exp(-x^{-c}) for :math:`x > 0``, :math:`c > 0``. `invweibull` takes :math:`c`` as a shape parameter. %(after_notes)s References ---------- F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011. %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, c): # invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c)) xc1 = np.power(x, -c - 1.0) xc2 = np.power(x, -c) xc2 = np.exp(-xc2) return c * xc1 * xc2 def _cdf(self, x, c): xc1 = np.power(x, -c) return np.exp(-xc1) def _ppf(self, q, c): return np.power(-np.log(q), -1.0/c) def _munp(self, n, c): return sc.gamma(1 - n / c) def _entropy(self, c): return 1+_EULER + _EULER / c - np.log(c) invweibull = invweibull_gen(a=0, name='invweibull') class johnsonsb_gen(rv_continuous): r"""A Johnson SB continuous random variable. %(before_notes)s See Also -------- johnsonsu Notes ----- The probability density function for `johnsonsb` is: .. math:: f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} ) for :math:`0 < x < 1` and :math:`a, b > 0`, and :math:`\phi` is the normal pdf. `johnsonsb` takes :math:`a` and :math:`b` as shape parameters. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _argcheck(self, a, b): return (b > 0) & (a == a) def _pdf(self, x, a, b): # johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x))) trm = _norm_pdf(a + b*np.log(x/(1.0-x))) return b*1.0/(x*(1-x))*trm def _cdf(self, x, a, b): return _norm_cdf(a + b*np.log(x/(1.0-x))) def _ppf(self, q, a, b): return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a))) johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb') class johnsonsu_gen(rv_continuous): r"""A Johnson SU continuous random variable. %(before_notes)s See Also -------- johnsonsb Notes ----- The probability density function for `johnsonsu` is: .. math:: f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}} \phi(a + b \log(x + \sqrt{x^2 + 1})) for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf. `johnsonsu` takes :math:`a` and :math:`b` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, a, b): return (b > 0) & (a == a) def _pdf(self, x, a, b): # johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) * # phi(a + b * log(x + sqrt(x**2 + 1))) x2 = x*x trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1))) return b*1.0/np.sqrt(x2+1.0)*trm def _cdf(self, x, a, b): return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1))) def _ppf(self, q, a, b): return np.sinh((_norm_ppf(q) - a) / b) johnsonsu = johnsonsu_gen(name='johnsonsu') class laplace_gen(rv_continuous): r"""A Laplace continuous random variable. %(before_notes)s Notes ----- The probability density function for `laplace` is: .. math:: f(x) = \frac{1}{2} \exp(-|x|) %(after_notes)s %(example)s """ def _rvs(self): return self._random_state.laplace(0, 1, size=self._size) def _pdf(self, x): # laplace.pdf(x) = 1/2 * exp(-abs(x)) return 0.5*np.exp(-abs(x)) def _cdf(self, x): return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x)) def _ppf(self, q): return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q)) def _stats(self): return 0, 2, 0, 3 def _entropy(self): return np.log(2)+1 laplace = laplace_gen(name='laplace') class levy_gen(rv_continuous): r"""A Levy continuous random variable. %(before_notes)s See Also -------- levy_stable, levy_l Notes ----- The probability density function for `levy` is: .. math:: f(x) = \frac{1}{x \sqrt{2\pi x}) \exp(-\frac{1}{2x}} for :math:`x > 0`. This is the same as the Levy-stable distribution with :math:`a=1/2` and :math:`b=1`. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x): # levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x)) return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x)) def _cdf(self, x): # Equivalent to 2*norm.sf(np.sqrt(1/x)) return sc.erfc(np.sqrt(0.5 / x)) def _ppf(self, q): # Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2) val = -sc.ndtri(q/2) return 1.0 / (val * val) def _stats(self): return np.inf, np.inf, np.nan, np.nan levy = levy_gen(a=0.0, name="levy") class levy_l_gen(rv_continuous): r"""A left-skewed Levy continuous random variable. %(before_notes)s See Also -------- levy, levy_stable Notes ----- The probability density function for `levy_l` is: .. math:: f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp(-\frac{1}{2 |x|}) for :math:`x < 0`. This is the same as the Levy-stable distribution with :math:`a=1/2` and :math:`b=-1`. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x): # levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x))) ax = abs(x) return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax)) def _cdf(self, x): ax = abs(x) return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1 def _ppf(self, q): val = _norm_ppf((q + 1.0) / 2) return -1.0 / (val * val) def _stats(self): return np.inf, np.inf, np.nan, np.nan levy_l = levy_l_gen(b=0.0, name="levy_l") class levy_stable_gen(rv_continuous): r"""A Levy-stable continuous random variable. %(before_notes)s See Also -------- levy, levy_l Notes ----- Levy-stable distribution (only random variates available -- ignore other docs) """ def _rvs(self, alpha, beta): def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): return (2/np.pi*(np.pi/2 + bTH)*tanTH - beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH))) def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): return (W/(cosTH/np.tan(aTH) + np.sin(TH)) * ((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha)) def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): # alpha is not 1 and beta is not 0 val0 = beta*np.tan(np.pi*alpha/2) th0 = np.arctan(val0)/alpha val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH)) res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH - val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha) return res3 def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W): res = _lazywhere(beta == 0, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), beta0func, f2=otherwise) return res sz = self._size alpha = broadcast_to(alpha, sz) beta = broadcast_to(beta, sz) TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz, random_state=self._random_state) W = expon.rvs(size=sz, random_state=self._random_state) aTH = alpha*TH bTH = beta*TH cosTH = np.cos(TH) tanTH = np.tan(TH) res = _lazywhere(alpha == 1, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W), alpha1func, f2=alphanot1func) return res def _argcheck(self, alpha, beta): return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1) def _pdf(self, x, alpha, beta): raise NotImplementedError levy_stable = levy_stable_gen(name='levy_stable') class logistic_gen(rv_continuous): r"""A logistic (or Sech-squared) continuous random variable. %(before_notes)s Notes ----- The probability density function for `logistic` is: .. math:: f(x) = \frac{\exp(-x)} {(1+exp(-x))^2} `logistic` is a special case of `genlogistic` with ``c == 1``. %(after_notes)s %(example)s """ def _rvs(self): return self._random_state.logistic(size=self._size) def _pdf(self, x): # logistic.pdf(x) = exp(-x) / (1+exp(-x))**2 return np.exp(self._logpdf(x)) def _logpdf(self, x): return -x - 2. * sc.log1p(np.exp(-x)) def _cdf(self, x): return sc.expit(x) def _ppf(self, q): return sc.logit(q) def _sf(self, x): return sc.expit(-x) def _isf(self, q): return -sc.logit(q) def _stats(self): return 0, np.pi*np.pi/3.0, 0, 6.0/5.0 def _entropy(self): # http://en.wikipedia.org/wiki/Logistic_distribution return 2.0 logistic = logistic_gen(name='logistic') class loggamma_gen(rv_continuous): r"""A log gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `loggamma` is: .. math:: f(x, c) = \frac{\exp(c x - \exp(x))} {\gamma(c)} for all :math:`x, c > 0`. `loggamma` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, c): return np.log(self._random_state.gamma(c, size=self._size)) def _pdf(self, x, c): # loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c) return np.exp(c*x-np.exp(x)-sc.gammaln(c)) def _cdf(self, x, c): return sc.gammainc(c, np.exp(x)) def _ppf(self, q, c): return np.log(sc.gammaincinv(c, q)) def _stats(self, c): # See, for example, "A Statistical Study of Log-Gamma Distribution", by # Ping Shing Chan (thesis, McMaster University, 1993). mean = sc.digamma(c) var = sc.polygamma(1, c) skewness = sc.polygamma(2, c) / np.power(var, 1.5) excess_kurtosis = sc.polygamma(3, c) / (var*var) return mean, var, skewness, excess_kurtosis loggamma = loggamma_gen(name='loggamma') class loglaplace_gen(rv_continuous): r"""A log-Laplace continuous random variable. %(before_notes)s Notes ----- The probability density function for `loglaplace` is: .. math:: f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\ \frac{c}{2} x^{-c-1} &\text{for } x \ge 1 \end{cases} for ``c > 0``. `loglaplace` takes ``c`` as a shape parameter. %(after_notes)s References ---------- T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model", The Mathematical Scientist, vol. 28, pp. 49-60, 2003. %(example)s """ def _pdf(self, x, c): # loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1 # = c / 2 * x**(-c-1), for x >= 1 cd2 = c/2.0 c = np.where(x < 1, c, -c) return cd2*x**(c-1) def _cdf(self, x, c): return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c)) def _ppf(self, q, c): return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c)) def _munp(self, n, c): return c**2 / (c**2 - n**2) def _entropy(self, c): return np.log(2.0/c) + 1.0 loglaplace = loglaplace_gen(a=0.0, name='loglaplace') def _lognorm_logpdf(x, s): return _lazywhere(x != 0, (x, s), lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)), -np.inf) class lognorm_gen(rv_continuous): r"""A lognormal continuous random variable. %(before_notes)s Notes ----- The probability density function for `lognorm` is: .. math:: f(x, s) = \frac{1}{s x \sqrt{2\pi}} \exp(-\frac{1}{2} (\frac{\log(x)}{s})^2) for ``x > 0``, ``s > 0``. `lognorm` takes ``s`` as a shape parameter. %(after_notes)s A common parametrization for a lognormal random variable ``Y`` is in terms of the mean, ``mu``, and standard deviation, ``sigma``, of the unique normally distributed random variable ``X`` such that exp(X) = Y. This parametrization corresponds to setting ``s = sigma`` and ``scale = exp(mu)``. %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self, s): return np.exp(s * self._random_state.standard_normal(self._size)) def _pdf(self, x, s): # lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2) return np.exp(self._logpdf(x, s)) def _logpdf(self, x, s): return _lognorm_logpdf(x, s) def _cdf(self, x, s): return _norm_cdf(np.log(x) / s) def _logcdf(self, x, s): return _norm_logcdf(np.log(x) / s) def _ppf(self, q, s): return np.exp(s * _norm_ppf(q)) def _sf(self, x, s): return _norm_sf(np.log(x) / s) def _logsf(self, x, s): return _norm_logsf(np.log(x) / s) def _stats(self, s): p = np.exp(s*s) mu = np.sqrt(p) mu2 = p*(p-1) g1 = np.sqrt((p-1))*(2+p) g2 = np.polyval([1, 2, 3, 0, -6.0], p) return mu, mu2, g1, g2 def _entropy(self, s): return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s)) @extend_notes_in_docstring(rv_continuous, notes="""\ When the location parameter is fixed by using the `floc` argument, this function uses explicit formulas for the maximum likelihood estimation of the log-normal shape and scale parameters, so the `optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""") def fit(self, data, *args, **kwds): floc = kwds.get('floc', None) if floc is None: # loc is not fixed. Use the default fit method. return super(lognorm_gen, self).fit(data, *args, **kwds) f0 = (kwds.get('f0', None) or kwds.get('fs', None) or kwds.get('fix_s', None)) fscale = kwds.get('fscale', None) if len(args) > 1: raise TypeError("Too many input arguments.") for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale', 'optimizer']: kwds.pop(name, None) if kwds: raise TypeError("Unknown arguments: %s." % kwds) # Special case: loc is fixed. Use the maximum likelihood formulas # instead of the numerical solver. if f0 is not None and fscale is not None: # This check is for consistency with `rv_continuous.fit`. raise ValueError("All parameters fixed. There is nothing to " "optimize.") data = np.asarray(data) floc = float(floc) if floc != 0: # Shifting the data by floc. Don't do the subtraction in-place, # because `data` might be a view of the input array. data = data - floc if np.any(data <= 0): raise FitDataError("lognorm", lower=floc, upper=np.inf) lndata = np.log(data) # Three cases to handle: # * shape and scale both free # * shape fixed, scale free # * shape free, scale fixed if fscale is None: # scale is free. scale = np.exp(lndata.mean()) if f0 is None: # shape is free. shape = lndata.std() else: # shape is fixed. shape = float(f0) else: # scale is fixed, shape is free scale = float(fscale) shape = np.sqrt(((lndata - np.log(scale))**2).mean()) return shape, floc, scale lognorm = lognorm_gen(a=0.0, name='lognorm') class gilbrat_gen(rv_continuous): r"""A Gilbrat continuous random variable. %(before_notes)s Notes ----- The probability density function for `gilbrat` is: .. math:: f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2) `gilbrat` is a special case of `lognorm` with ``s = 1``. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self): return np.exp(self._random_state.standard_normal(self._size)) def _pdf(self, x): # gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2) return np.exp(self._logpdf(x)) def _logpdf(self, x): return _lognorm_logpdf(x, 1.0) def _cdf(self, x): return _norm_cdf(np.log(x)) def _ppf(self, q): return np.exp(_norm_ppf(q)) def _stats(self): p = np.e mu = np.sqrt(p) mu2 = p * (p - 1) g1 = np.sqrt((p - 1)) * (2 + p) g2 = np.polyval([1, 2, 3, 0, -6.0], p) return mu, mu2, g1, g2 def _entropy(self): return 0.5 * np.log(2 * np.pi) + 0.5 gilbrat = gilbrat_gen(a=0.0, name='gilbrat') class maxwell_gen(rv_continuous): r"""A Maxwell continuous random variable. %(before_notes)s Notes ----- A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``, and given ``scale = a``, where ``a`` is the parameter used in the Mathworld description [1]_. The probability density function for `maxwell` is: .. math:: f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2) for ``x > 0``. %(after_notes)s References ---------- .. [1] http://mathworld.wolfram.com/MaxwellDistribution.html %(example)s """ def _rvs(self): return chi.rvs(3.0, size=self._size, random_state=self._random_state) def _pdf(self, x): # maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2) return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0) def _cdf(self, x): return sc.gammainc(1.5, x*x/2.0) def _ppf(self, q): return np.sqrt(2*sc.gammaincinv(1.5, q)) def _stats(self): val = 3*np.pi-8 return (2*np.sqrt(2.0/np.pi), 3-8/np.pi, np.sqrt(2)*(32-10*np.pi)/val**1.5, (-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0) def _entropy(self): return _EULER + 0.5*np.log(2*np.pi)-0.5 maxwell = maxwell_gen(a=0.0, name='maxwell') class mielke_gen(rv_continuous): r"""A Mielke's Beta-Kappa continuous random variable. %(before_notes)s Notes ----- The probability density function for `mielke` is: .. math:: f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}} for ``x > 0``. `mielke` takes ``k`` and ``s`` as shape parameters. %(after_notes)s %(example)s """ def _pdf(self, x, k, s): # mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s) return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s) def _cdf(self, x, k, s): return x**k / (1.0+x**s)**(k*1.0/s) def _ppf(self, q, k, s): qsk = pow(q, s*1.0/k) return pow(qsk/(1.0-qsk), 1.0/s) mielke = mielke_gen(a=0.0, name='mielke') class kappa4_gen(rv_continuous): r"""Kappa 4 parameter distribution. %(before_notes)s Notes ----- The probability density function for kappa4 is: .. math:: f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1} if :math:`h` and :math:`k` are not equal to 0. If :math:`h` or :math:`k` are zero then the pdf can be simplified: h = 0 and k != 0:: kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)* exp(-(1.0 - k*x)**(1.0/k)) h != 0 and k = 0:: kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0) h = 0 and k = 0:: kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x)) kappa4 takes :math:`h` and :math:`k` as shape parameters. The kappa4 distribution returns other distributions when certain :math:`h` and :math:`k` values are used. +------+-------------+----------------+------------------+ | h | k=0.0 | k=1.0 | -inf<=k<=inf | +======+=============+================+==================+ | -1.0 | Logistic | | Generalized | | | | | Logistic(1) | | | | | | | | logistic(x) | | | +------+-------------+----------------+------------------+ | 0.0 | Gumbel | Reverse | Generalized | | | | Exponential(2) | Extreme Value | | | | | | | | gumbel_r(x) | | genextreme(x, k) | +------+-------------+----------------+------------------+ | 1.0 | Exponential | Uniform | Generalized | | | | | Pareto | | | | | | | | expon(x) | uniform(x) | genpareto(x, -k) | +------+-------------+----------------+------------------+ (1) There are at least five generalized logistic distributions. Four are described here: https://en.wikipedia.org/wiki/Generalized_logistic_distribution The "fifth" one is the one kappa4 should match which currently isn't implemented in scipy: https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html (2) This distribution is currently not in scipy. References ---------- J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate Faculty of the Louisiana State University and Agricultural and Mechanical College, (August, 2004), http://digitalcommons.lsu.edu/cgi/viewcontent.cgi?article=4671&context=gradschool_dissertations J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res. Develop. 38 (3), 25 1-258 (1994). B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao Site in the Chi River Basin, Thailand", Journal of Water Resource and Protection, vol. 4, 866-869, (2012). http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March 2000). http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf %(after_notes)s %(example)s """ def _argcheck(self, h, k): condlist = [np.logical_and(h > 0, k > 0), np.logical_and(h > 0, k == 0), np.logical_and(h > 0, k < 0), np.logical_and(h <= 0, k > 0), np.logical_and(h <= 0, k == 0), np.logical_and(h <= 0, k < 0)] def f0(h, k): return (1.0 - float_power(h, -k))/k def f1(h, k): return np.log(h) def f3(h, k): a = np.empty(np.shape(h)) a[:] = -np.inf return a def f5(h, k): return 1.0/k self.a = _lazyselect(condlist, [f0, f1, f0, f3, f3, f5], [h, k], default=np.nan) def f0(h, k): return 1.0/k def f1(h, k): a = np.empty(np.shape(h)) a[:] = np.inf return a self.b = _lazyselect(condlist, [f0, f1, f1, f0, f1, f1], [h, k], default=np.nan) return h == h def _pdf(self, x, h, k): # kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)* # (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1) return np.exp(self._logpdf(x, h, k)) def _logpdf(self, x, h, k): condlist = [np.logical_and(h != 0, k != 0), np.logical_and(h == 0, k != 0), np.logical_and(h != 0, k == 0), np.logical_and(h == 0, k == 0)] def f0(x, h, k): '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*( 1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0) logpdf = ... ''' return (sc.xlog1py(1.0/k - 1.0, -k*x) + sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k))) def f1(x, h, k): '''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-( 1.0 - k*x)**(1.0/k)) logpdf = ... ''' return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k) def f2(x, h, k): '''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0) logpdf = ... ''' return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x)) def f3(x, h, k): '''pdf = np.exp(-x-np.exp(-x)) logpdf = ... ''' return -x - np.exp(-x) return _lazyselect(condlist, [f0, f1, f2, f3], [x, h, k], default=np.nan) def _cdf(self, x, h, k): return np.exp(self._logcdf(x, h, k)) def _logcdf(self, x, h, k): condlist = [np.logical_and(h != 0, k != 0), np.logical_and(h == 0, k != 0), np.logical_and(h != 0, k == 0), np.logical_and(h == 0, k == 0)] def f0(x, h, k): '''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h) logcdf = ... ''' return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k)) def f1(x, h, k): '''cdf = np.exp(-(1.0 - k*x)**(1.0/k)) logcdf = ... ''' return -(1.0 - k*x)**(1.0/k) def f2(x, h, k): '''cdf = (1.0 - h*np.exp(-x))**(1.0/h) logcdf = ... ''' return (1.0/h)*sc.log1p(-h*np.exp(-x)) def f3(x, h, k): '''cdf = np.exp(-np.exp(-x)) logcdf = ... ''' return -np.exp(-x) return _lazyselect(condlist, [f0, f1, f2, f3], [x, h, k], default=np.nan) def _ppf(self, q, h, k): condlist = [np.logical_and(h != 0, k != 0), np.logical_and(h == 0, k != 0), np.logical_and(h != 0, k == 0), np.logical_and(h == 0, k == 0)] def f0(q, h, k): return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k) def f1(q, h, k): return 1.0/k*(1.0 - (-np.log(q))**k) def f2(q, h, k): '''ppf = -np.log((1.0 - (q**h))/h) ''' return -sc.log1p(-(q**h)) + np.log(h) def f3(q, h, k): return -np.log(-np.log(q)) return _lazyselect(condlist, [f0, f1, f2, f3], [q, h, k], default=np.nan) def _stats(self, h, k): if h >= 0 and k >= 0: maxr = 5 elif h < 0 and k >= 0: maxr = int(-1.0/h*k) elif k < 0: maxr = int(-1.0/k) else: maxr = 5 outputs = [None if r < maxr else np.nan for r in range(1, 5)] return outputs[:] kappa4 = kappa4_gen(name='kappa4') class kappa3_gen(rv_continuous): r"""Kappa 3 parameter distribution. %(before_notes)s Notes ----- The probability density function for `kappa` is: .. math:: f(x, a) = \begin{cases} a [a + x^a]^{-(a + 1)/a}, &\text{for } x > 0\\ 0.0, &\text{for } x \le 0 \end{cases} `kappa3` takes :math:`a` as a shape parameter and :math:`a > 0`. References ---------- P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum Likelihood and Likelihood Ratio Tests", Methods in Weather Research, 701-707, (September, 1973), http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2, 415-419 (2012) http://file.scirp.org/pdf/OJS20120400011_95789012.pdf %(after_notes)s %(example)s """ def _argcheck(self, a): return a > 0 def _pdf(self, x, a): # kappa3.pdf(x, a) = # a*[a + x**a]**(-(a + 1)/a), for x > 0 # 0.0, for x <= 0 return a*(a + x**a)**(-1.0/a-1) def _cdf(self, x, a): return x*(a + x**a)**(-1.0/a) def _ppf(self, q, a): return (a/(q**-a - 1.0))**(1.0/a) def _stats(self, a): outputs = [None if i < a else np.nan for i in range(1, 5)] return outputs[:] kappa3 = kappa3_gen(a=0.0, name='kappa3') class moyal_gen(rv_continuous): r"""A Moyal continuous random variable. %(before_notes)s Notes ----- The probability density function for `moyal` is: .. math:: f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi} %(after_notes)s This distribution has utility in high-energy physics and radiation detection. It describes the energy loss of a charged relativistic particle due to ionization of the medium [1]_. It also provides an approximation for the Landau distribution. For an in depth description see [2]_. For additional description, see [3]_. References ---------- .. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations", The London, Edinburgh, and Dublin Philosophical Magazine and Journal of Science, vol 46, 263-280, (1955). https://doi.org/10.1080/14786440308521076 (gated) .. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution", International Journal of Research and Reviews in Applied Sciences, vol 10, 171-192, (2012). http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf .. [3] C. Walck, "Handbook on Statistical Distributions for Experimentalists; International Report SUF-PFY/96-01", Chapter 26, University of Stockholm: Stockholm, Sweden, (2007). www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf .. versionadded:: 1.1.0 %(example)s """ def _rvs(self): sz, rndm = self._size, self._random_state u1 = gamma.rvs(a = 0.5, scale = 2, size=sz, random_state=rndm) return -np.log(u1) def _pdf(self, x): return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi) def _cdf(self, x): return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2)) def _sf(self, x): return sc.erf(np.exp(-0.5 * x) / np.sqrt(2)) def _ppf(self, x): return -np.log(2 * sc.erfcinv(x)**2) def _stats(self): mu = np.log(2) + np.euler_gamma mu2 = np.pi**2 / 2 g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3 g2 = 4. return mu, mu2, g1, g2 def _munp(self, n): if n == 1.0: return np.log(2) + np.euler_gamma elif n == 2.0: return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2 elif n == 3.0: tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma) tmp2 = (np.log(2)+np.euler_gamma)**3 tmp3 = 14 * sc.zeta(3) return tmp1 + tmp2 + tmp3 elif n == 4.0: tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma) tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2 tmp3 = (np.log(2) + np.euler_gamma)**4 tmp4 = 7 * np.pi**4 / 4 return tmp1 + tmp2 + tmp3 + tmp4 else: # return generic for higher moments # return rv_continuous._mom1_sc(self, n, b) return self._mom1_sc(n) moyal = moyal_gen(name="moyal") class nakagami_gen(rv_continuous): r"""A Nakagami continuous random variable. %(before_notes)s Notes ----- The probability density function for `nakagami` is: .. math:: f(x, nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2) for ``x > 0``, ``nu > 0``. `nakagami` takes ``nu`` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, nu): # nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) * # x**(2*nu-1) * exp(-nu*x**2) return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x) def _cdf(self, x, nu): return sc.gammainc(nu, nu*x*x) def _ppf(self, q, nu): return np.sqrt(1.0/nu*sc.gammaincinv(nu, q)) def _stats(self, nu): mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu) mu2 = 1.0-mu*mu g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5) g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1 g2 /= nu*mu2**2.0 return mu, mu2, g1, g2 nakagami = nakagami_gen(a=0.0, name="nakagami") class ncx2_gen(rv_continuous): r"""A non-central chi-squared continuous random variable. %(before_notes)s Notes ----- The probability density function for `ncx2` is: .. math:: f(x, df, nc) = \exp(-\frac{nc+x}{2}) \frac{1}{2} (x/nc)^{(df-2)/4} I[(df-2)/2](\sqrt{nc x}) for :math:`x > 0`. `ncx2` takes ``df`` and ``nc`` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, df, nc): return self._random_state.noncentral_chisquare(df, nc, self._size) def _logpdf(self, x, df, nc): return _ncx2_log_pdf(x, df, nc) def _pdf(self, x, df, nc): # ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4) # * I[(df-2)/2](sqrt(nc*x)) return _ncx2_pdf(x, df, nc) def _cdf(self, x, df, nc): return _ncx2_cdf(x, df, nc) def _ppf(self, q, df, nc): return sc.chndtrix(q, df, nc) def _stats(self, df, nc): val = df + 2.0*nc return (df + nc, 2*val, np.sqrt(8)*(val+nc)/val**1.5, 12.0*(val+2*nc)/val**2.0) ncx2 = ncx2_gen(a=0.0, name='ncx2') class ncf_gen(rv_continuous): r"""A non-central F distribution continuous random variable. %(before_notes)s Notes ----- The probability density function for `ncf` is: .. math:: f(x, n_1, n_2, \lambda) = \exp(\frac{\lambda}{2} + \lambda n_1 \frac{x}{2(n_1 x+n_2)}) n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\ (n_2+n_1 x)^{-(n_1+n_2)/2} \gamma(n_1/2) \gamma(1+n_2/2) \\ \frac{L^{\frac{v_1}{2}-1}_{v_2/2} (-\lambda v_1 \frac{x}{2(v_1 x+v_2)})} {(B(v_1/2, v_2/2) \gamma(\frac{v_1+v_2}{2})} for :math:`n_1 > 1`, :math:`n_2, \lambda > 0`. Here :math:`n_1` is the degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in the denominator, :math:`\lambda` the non-centrality parameter, :math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a generalized Laguerre polynomial and :math:`B` is the beta function. `ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, dfn, dfd, nc): return self._random_state.noncentral_f(dfn, dfd, nc, self._size) def _pdf_skip(self, x, dfn, dfd, nc): # ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) * # df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) * # (df2+df1*x)**(-(df1+df2)/2) * # gamma(df1/2)*gamma(1+df2/2) * # L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) / # (B(v1/2, v2/2) * gamma((v1+v2)/2)) n1, n2 = dfn, dfd term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.) term -= sc.gammaln((n1+n2)/2.0) Px = np.exp(term) Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1) Px *= (n2+n1*x)**(-(n1+n2)/2) Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1) Px /= sc.beta(n1/2, n2/2) # This function does not have a return. Drop it for now, the generic # function seems to work OK. def _cdf(self, x, dfn, dfd, nc): return sc.ncfdtr(dfn, dfd, nc, x) def _ppf(self, q, dfn, dfd, nc): return sc.ncfdtri(dfn, dfd, nc, q) def _munp(self, n, dfn, dfd, nc): val = (dfn * 1.0/dfd)**n term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5) val *= np.exp(-nc / 2.0+term) val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc) return val def _stats(self, dfn, dfd, nc): mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn)) mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 * ((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / ((dfd-2.0)**2.0 * (dfd-4.0))) return mu, mu2, None, None ncf = ncf_gen(a=0.0, name='ncf') class t_gen(rv_continuous): r"""A Student's T continuous random variable. %(before_notes)s Notes ----- The probability density function for `t` is: .. math:: f(x, df) = \frac{\gamma((df+1)/2)} {\sqrt{\pi*df} \gamma(df/2) (1+x^2/df)^{(df+1)/2}} for ``df > 0``. `t` takes ``df`` as a shape parameter. %(after_notes)s %(example)s """ def _rvs(self, df): return self._random_state.standard_t(df, size=self._size) def _pdf(self, x, df): # gamma((df+1)/2) # t.pdf(x, df) = --------------------------------------------------- # sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2) r = np.asarray(df*1.0) Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2)) Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2) return Px def _logpdf(self, x, df): r = df*1.0 lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2) lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r) return lPx def _cdf(self, x, df): return sc.stdtr(df, x) def _sf(self, x, df): return sc.stdtr(df, -x) def _ppf(self, q, df): return sc.stdtrit(df, q) def _isf(self, q, df): return -sc.stdtrit(df, q) def _stats(self, df): mu2 = _lazywhere(df > 2, (df,), lambda df: df / (df-2.0), np.inf) g1 = np.where(df > 3, 0.0, np.nan) g2 = _lazywhere(df > 4, (df,), lambda df: 6.0 / (df-4.0), np.nan) return 0, mu2, g1, g2 t = t_gen(name='t') class nct_gen(rv_continuous): r"""A non-central Student's T continuous random variable. %(before_notes)s Notes ----- The probability density function for `nct` is: .. math:: f(x, df, nc) = \frac{df^{df/2} \gamma(df+1)}{2^{df} \exp(nc^2 / 2) (df+x^2)^{df/2} \gamma(df/2)} for ``df > 0``. `nct` takes ``df`` and ``nc`` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, df, nc): return (df > 0) & (nc == nc) def _rvs(self, df, nc): sz, rndm = self._size, self._random_state n = norm.rvs(loc=nc, size=sz, random_state=rndm) c2 = chi2.rvs(df, size=sz, random_state=rndm) return n * np.sqrt(df) / np.sqrt(c2) def _pdf(self, x, df, nc): # nct.pdf(x, df, nc) = # df**(df/2) * gamma(df+1) # ---------------------------------------------------- # 2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2) n = df*1.0 nc = nc*1.0 x2 = x*x ncx2 = nc*nc*x2 fac1 = n + x2 trm1 = n/2.*np.log(n) + sc.gammaln(n+1) trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.) Px = np.exp(trm1) valF = ncx2 / (2*fac1) trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF) trm1 /= np.asarray(fac1*sc.gamma((n+1)/2)) trm2 = sc.hyp1f1((n+1)/2, 0.5, valF) trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1)) Px *= trm1+trm2 return Px def _cdf(self, x, df, nc): return sc.nctdtr(df, nc, x) def _ppf(self, q, df, nc): return sc.nctdtrit(df, nc, q) def _stats(self, df, nc, moments='mv'): # # See D. Hogben, R.S. Pinkham, and M.B. Wilk, # 'The moments of the non-central t-distribution' # Biometrika 48, p. 465 (2961). # e.g. http://www.jstor.org/stable/2332772 (gated) # mu, mu2, g1, g2 = None, None, None, None gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.) c11 = np.sqrt(df/2.) * gfac c20 = df / (df-2.) c22 = c20 - c11*c11 mu = np.where(df > 1, nc*c11, np.inf) mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf) if 's' in moments: c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11 c31t = 3.*df / (df-2.) / (df-3.) mu3 = (c33t*nc*nc + c31t) * c11*nc g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan) # kurtosis if 'k' in moments: c44 = df*df / (df-2.) / (df-4.) c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.) c44 -= 3.*c11**4 c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.) c42 *= 6.*df / (df-2.) c40 = 3.*df*df / (df-2.) / (df-4.) mu4 = c44 * nc**4 + c42*nc**2 + c40 g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan) return mu, mu2, g1, g2 nct = nct_gen(name="nct") class pareto_gen(rv_continuous): r"""A Pareto continuous random variable. %(before_notes)s Notes ----- The probability density function for `pareto` is: .. math:: f(x, b) = \frac{b}{x^{b+1}} for :math:`x \ge 1`, :math:`b > 0`. `pareto` takes :math:`b` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, b): # pareto.pdf(x, b) = b / x**(b+1) return b * x**(-b-1) def _cdf(self, x, b): return 1 - x**(-b) def _ppf(self, q, b): return pow(1-q, -1.0/b) def _sf(self, x, b): return x**(-b) def _stats(self, b, moments='mv'): mu, mu2, g1, g2 = None, None, None, None if 'm' in moments: mask = b > 1 bt = np.extract(mask, b) mu = valarray(np.shape(b), value=np.inf) np.place(mu, mask, bt / (bt-1.0)) if 'v' in moments: mask = b > 2 bt = np.extract(mask, b) mu2 = valarray(np.shape(b), value=np.inf) np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2) if 's' in moments: mask = b > 3 bt = np.extract(mask, b) g1 = valarray(np.shape(b), value=np.nan) vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt)) np.place(g1, mask, vals) if 'k' in moments: mask = b > 4 bt = np.extract(mask, b) g2 = valarray(np.shape(b), value=np.nan) vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) / np.polyval([1.0, -7.0, 12.0, 0.0], bt)) np.place(g2, mask, vals) return mu, mu2, g1, g2 def _entropy(self, c): return 1 + 1.0/c - np.log(c) pareto = pareto_gen(a=1.0, name="pareto") class lomax_gen(rv_continuous): r"""A Lomax (Pareto of the second kind) continuous random variable. %(before_notes)s Notes ----- The Lomax distribution is a special case of the Pareto distribution, with (loc=-1.0). The probability density function for `lomax` is: .. math:: f(x, c) = \frac{c}{(1+x)^{c+1}} for :math:`x \ge 0`, ``c > 0``. `lomax` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # lomax.pdf(x, c) = c / (1+x)**(c+1) return c*1.0/(1.0+x)**(c+1.0) def _logpdf(self, x, c): return np.log(c) - (c+1)*sc.log1p(x) def _cdf(self, x, c): return -sc.expm1(-c*sc.log1p(x)) def _sf(self, x, c): return np.exp(-c*sc.log1p(x)) def _logsf(self, x, c): return -c*sc.log1p(x) def _ppf(self, q, c): return sc.expm1(-sc.log1p(-q)/c) def _stats(self, c): mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk') return mu, mu2, g1, g2 def _entropy(self, c): return 1+1.0/c-np.log(c) lomax = lomax_gen(a=0.0, name="lomax") class pearson3_gen(rv_continuous): r"""A pearson type III continuous random variable. %(before_notes)s Notes ----- The probability density function for `pearson3` is: .. math:: f(x, skew) = \frac{|\beta|}{\gamma(\alpha)} (\beta (x - \zeta))^{alpha - 1} \exp(-\beta (x - \zeta)) where: .. math:: \beta = \frac{2}{skew stddev} \alpha = (stddev \beta)^2 \zeta = loc - \frac{\alpha}{\beta} `pearson3` takes ``skew`` as a shape parameter. %(after_notes)s %(example)s References ---------- R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water Resources Research, Vol.27, 3149-3158 (1991). L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist., Vol.1, 191-198 (1930). "Using Modern Computing Tools to Fit the Pearson Type III Distribution to Aviation Loads Data", Office of Aviation Research (2003). """ def _preprocess(self, x, skew): # The real 'loc' and 'scale' are handled in the calling pdf(...). The # local variables 'loc' and 'scale' within pearson3._pdf are set to # the defaults just to keep them as part of the equations for # documentation. loc = 0.0 scale = 1.0 # If skew is small, return _norm_pdf. The divide between pearson3 # and norm was found by brute force and is approximately a skew of # 0.000016. No one, I hope, would actually use a skew value even # close to this small. norm2pearson_transition = 0.000016 ans, x, skew = np.broadcast_arrays([1.0], x, skew) ans = ans.copy() # mask is True where skew is small enough to use the normal approx. mask = np.absolute(skew) < norm2pearson_transition invmask = ~mask beta = 2.0 / (skew[invmask] * scale) alpha = (scale * beta)**2 zeta = loc - alpha / beta transx = beta * (x[invmask] - zeta) return ans, x, transx, mask, invmask, beta, alpha, zeta def _argcheck(self, skew): # The _argcheck function in rv_continuous only allows positive # arguments. The skew argument for pearson3 can be zero (which I want # to handle inside pearson3._pdf) or negative. So just return True # for all skew args. return np.ones(np.shape(skew), dtype=bool) def _stats(self, skew): _, _, _, _, _, beta, alpha, zeta = ( self._preprocess([1], skew)) m = zeta + alpha / beta v = alpha / (beta**2) s = 2.0 / (alpha**0.5) * np.sign(beta) k = 6.0 / alpha return m, v, s, k def _pdf(self, x, skew): # pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) * # (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta)) # Do the calculation in _logpdf since helps to limit # overflow/underflow problems ans = np.exp(self._logpdf(x, skew)) if ans.ndim == 0: if np.isnan(ans): return 0.0 return ans ans[np.isnan(ans)] = 0.0 return ans def _logpdf(self, x, skew): # PEARSON3 logpdf GAMMA logpdf # np.log(abs(beta)) # + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x) # - beta*(x - zeta) - x # - sc.gammalnalpha) - sc.gammalna) ans, x, transx, mask, invmask, beta, alpha, _ = ( self._preprocess(x, skew)) ans[mask] = np.log(_norm_pdf(x[mask])) ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha) return ans def _cdf(self, x, skew): ans, x, transx, mask, invmask, _, alpha, _ = ( self._preprocess(x, skew)) ans[mask] = _norm_cdf(x[mask]) ans[invmask] = gamma._cdf(transx, alpha) return ans def _rvs(self, skew): skew = broadcast_to(skew, self._size) ans, _, _, mask, invmask, beta, alpha, zeta = ( self._preprocess([0], skew)) nsmall = mask.sum() nbig = mask.size - nsmall ans[mask] = self._random_state.standard_normal(nsmall) ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta + zeta) if self._size == (): ans = ans[0] return ans def _ppf(self, q, skew): ans, q, _, mask, invmask, beta, alpha, zeta = ( self._preprocess(q, skew)) ans[mask] = _norm_ppf(q[mask]) ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta return ans pearson3 = pearson3_gen(name="pearson3") class powerlaw_gen(rv_continuous): r"""A power-function continuous random variable. %(before_notes)s Notes ----- The probability density function for `powerlaw` is: .. math:: f(x, a) = a x^{a-1} for :math:`0 \le x \le 1`, :math:`a > 0`. `powerlaw` takes :math:`a` as a shape parameter. %(after_notes)s `powerlaw` is a special case of `beta` with ``b == 1``. %(example)s """ def _pdf(self, x, a): # powerlaw.pdf(x, a) = a * x**(a-1) return a*x**(a-1.0) def _logpdf(self, x, a): return np.log(a) + sc.xlogy(a - 1, x) def _cdf(self, x, a): return x**(a*1.0) def _logcdf(self, x, a): return a*np.log(x) def _ppf(self, q, a): return pow(q, 1.0/a) def _stats(self, a): return (a / (a + 1.0), a / (a + 2.0) / (a + 1.0) ** 2, -2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a), 6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4))) def _entropy(self, a): return 1 - 1.0/a - np.log(a) powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw") class powerlognorm_gen(rv_continuous): r"""A power log-normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `powerlognorm` is: .. math:: f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s) (\Phi(-\log(x)/s))^{c-1} where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf, and :math:`x > 0`, :math:`s, c > 0`. `powerlognorm` takes :math:`c` and :math:`s` as shape parameters. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _pdf(self, x, c, s): # powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) * # (Phi(-log(x)/s))**(c-1), return (c/(x*s) * _norm_pdf(np.log(x)/s) * pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0)) def _cdf(self, x, c, s): return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0) def _ppf(self, q, c, s): return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c))) powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm") class powernorm_gen(rv_continuous): r"""A power normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `powernorm` is: .. math:: f(x, c) = c \phi(x) (\Phi(-x))^{c-1} where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf, and :math:`x > 0`, :math:`c > 0`. `powernorm` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, c): # powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1) return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0)) def _logpdf(self, x, c): return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x) def _cdf(self, x, c): return 1.0-_norm_cdf(-x)**(c*1.0) def _ppf(self, q, c): return -_norm_ppf(pow(1.0 - q, 1.0 / c)) powernorm = powernorm_gen(name='powernorm') class rdist_gen(rv_continuous): r"""An R-distributed continuous random variable. %(before_notes)s Notes ----- The probability density function for `rdist` is: .. math:: f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)} for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` takes :math:`c` as a shape parameter. This distribution includes the following distribution kernels as special cases:: c = 2: uniform c = 4: Epanechnikov (parabolic) c = 6: quartic (biweight) c = 8: triweight %(after_notes)s %(example)s """ def _pdf(self, x, c): # rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2) return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0) def _cdf(self, x, c): term1 = x / sc.beta(0.5, c / 2.0) res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2) # There's an issue with hyp2f1, it returns nans near x = +-1, c > 100. # Use the generic implementation in that case. See gh-1285 for # background. if np.any(np.isnan(res)): return rv_continuous._cdf(self, x, c) return res def _munp(self, n, c): numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0) return numerator / sc.beta(1. / 2, c / 2.) rdist = rdist_gen(a=-1.0, b=1.0, name="rdist") class rayleigh_gen(rv_continuous): r"""A Rayleigh continuous random variable. %(before_notes)s Notes ----- The probability density function for `rayleigh` is: .. math:: f(r) = r \exp(-r^2/2) for :math:`x \ge 0`. `rayleigh` is a special case of `chi` with ``df == 2``. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self): return chi.rvs(2, size=self._size, random_state=self._random_state) def _pdf(self, r): # rayleigh.pdf(r) = r * exp(-r**2/2) return np.exp(self._logpdf(r)) def _logpdf(self, r): return np.log(r) - 0.5 * r * r def _cdf(self, r): return -sc.expm1(-0.5 * r**2) def _ppf(self, q): return np.sqrt(-2 * sc.log1p(-q)) def _sf(self, r): return np.exp(self._logsf(r)) def _logsf(self, r): return -0.5 * r * r def _isf(self, q): return np.sqrt(-2 * np.log(q)) def _stats(self): val = 4 - np.pi return (np.sqrt(np.pi/2), val/2, 2*(np.pi-3)*np.sqrt(np.pi)/val**1.5, 6*np.pi/val-16/val**2) def _entropy(self): return _EULER/2.0 + 1 - 0.5*np.log(2) rayleigh = rayleigh_gen(a=0.0, name="rayleigh") class reciprocal_gen(rv_continuous): r"""A reciprocal continuous random variable. %(before_notes)s Notes ----- The probability density function for `reciprocal` is: .. math:: f(x, a, b) = \frac{1}{x \log(b/a)} for :math:`a \le x \le b`, :math:`a, b > 0`. `reciprocal` takes :math:`a` and :math:`b` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, a, b): self.a = a self.b = b self.d = np.log(b*1.0 / a) return (a > 0) & (b > 0) & (b > a) def _pdf(self, x, a, b): # reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) return 1.0 / (x * self.d) def _logpdf(self, x, a, b): return -np.log(x) - np.log(self.d) def _cdf(self, x, a, b): return (np.log(x)-np.log(a)) / self.d def _ppf(self, q, a, b): return a*pow(b*1.0/a, q) def _munp(self, n, a, b): return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n)) def _entropy(self, a, b): return 0.5*np.log(a*b)+np.log(np.log(b/a)) reciprocal = reciprocal_gen(name="reciprocal") class rice_gen(rv_continuous): r"""A Rice continuous random variable. %(before_notes)s Notes ----- The probability density function for `rice` is: .. math:: f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I[0](x b) for :math:`x > 0`, :math:`b > 0`. `rice` takes :math:`b` as a shape parameter. %(after_notes)s The Rice distribution describes the length, :math:`r`, of a 2-D vector with components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u, v` are independent Gaussian random variables with standard deviation :math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is ``rice.pdf(x, R/s, scale=s)``. %(example)s """ def _argcheck(self, b): return b >= 0 def _rvs(self, b): # http://en.wikipedia.org/wiki/Rice_distribution t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) + self._size) return np.sqrt((t*t).sum(axis=0)) def _cdf(self, x, b): return sc.chndtr(np.square(x), 2, np.square(b)) def _ppf(self, q, b): return np.sqrt(sc.chndtrix(q, 2, np.square(b))) def _pdf(self, x, b): # rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b) # # We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb. # The factor of np.exp(-xb) is then included in the i0e function # in place of the modified Bessel function, i0, improving # numerical stability for large values of xb. return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b) def _munp(self, n, b): nd2 = n/2.0 n1 = 1 + nd2 b2 = b*b/2.0 return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) * sc.hyp1f1(n1, 1, b2)) rice = rice_gen(a=0.0, name="rice") # FIXME: PPF does not work. class recipinvgauss_gen(rv_continuous): r"""A reciprocal inverse Gaussian continuous random variable. %(before_notes)s Notes ----- The probability density function for `recipinvgauss` is: .. math:: f(x, \mu) = \frac{1}{\sqrt{2\pi x}} \frac{\exp(-(1-\mu x)^2}{2x\mu^2)} for :math:`x \ge 0`. `recipinvgauss` takes :math:`\mu` as a shape parameter. %(after_notes)s %(example)s """ def _pdf(self, x, mu): # recipinvgauss.pdf(x, mu) = # 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2)) return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0)) def _logpdf(self, x, mu): return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x) def _cdf(self, x, mu): trm1 = 1.0/mu - x trm2 = 1.0/mu + x isqx = 1.0/np.sqrt(x) return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2) def _rvs(self, mu): return 1.0/self._random_state.wald(mu, 1.0, size=self._size) recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss') class semicircular_gen(rv_continuous): r"""A semicircular continuous random variable. %(before_notes)s Notes ----- The probability density function for `semicircular` is: .. math:: f(x) = \frac{2}{\pi} \sqrt{1-x^2} for :math:`-1 \le x \le 1`. %(after_notes)s %(example)s """ def _pdf(self, x): # semicircular.pdf(x) = 2/pi * sqrt(1-x**2) return 2.0/np.pi*np.sqrt(1-x*x) def _cdf(self, x): return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x)) def _stats(self): return 0, 0.25, 0, -1.0 def _entropy(self): return 0.64472988584940017414 semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular") class skew_norm_gen(rv_continuous): r"""A skew-normal random variable. %(before_notes)s Notes ----- The pdf is:: skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x) `skewnorm` takes :math:`a` as a skewness parameter When ``a = 0`` the distribution is identical to a normal distribution. rvs implements the method of [1]_. %(after_notes)s %(example)s References ---------- .. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602. http://azzalini.stat.unipd.it/SN/faq-r.html """ def _argcheck(self, a): return np.isfinite(a) def _pdf(self, x, a): return 2.*_norm_pdf(x)*_norm_cdf(a*x) def _cdf_single(self, x, *args): if x <= 0: cdf = integrate.quad(self._pdf, self.a, x, args=args)[0] else: t1 = integrate.quad(self._pdf, self.a, 0, args=args)[0] t2 = integrate.quad(self._pdf, 0, x, args=args)[0] cdf = t1 + t2 if cdf > 1: # Presumably numerical noise, e.g. 1.0000000000000002 cdf = 1.0 return cdf def _sf(self, x, a): return self._cdf(-x, -a) def _rvs(self, a): u0 = self._random_state.normal(size=self._size) v = self._random_state.normal(size=self._size) d = a/np.sqrt(1 + a**2) u1 = d*u0 + v*np.sqrt(1 - d**2) return np.where(u0 >= 0, u1, -u1) def _stats(self, a, moments='mvsk'): output = [None, None, None, None] const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2) if 'm' in moments: output[0] = const if 'v' in moments: output[1] = 1 - const**2 if 's' in moments: output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3 if 'k' in moments: output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2) return output skewnorm = skew_norm_gen(name='skewnorm') class trapz_gen(rv_continuous): r"""A trapezoidal continuous random variable. %(before_notes)s Notes ----- The trapezoidal distribution can be represented with an up-sloping line from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)`` and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. `trapz` takes :math:`c` and :math:`d` as shape parameters. %(after_notes)s The standard form is in the range [0, 1] with c the mode. The location parameter shifts the start to `loc`. The scale parameter changes the width from 1 to `scale`. %(example)s """ def _argcheck(self, c, d): return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c) def _pdf(self, x, c, d): u = 2 / (d-c+1) return _lazyselect([x < c, (c <= x) & (x <= d), x > d], [lambda x, c, d, u: u * x / c, lambda x, c, d, u: u, lambda x, c, d, u: u * (1-x) / (1-d)], (x, c, d, u)) def _cdf(self, x, c, d): return _lazyselect([x < c, (c <= x) & (x <= d), x > d], [lambda x, c, d: x**2 / c / (d-c+1), lambda x, c, d: (c + 2 * (x-c)) / (d-c+1), lambda x, c, d: 1-((1-x) ** 2 / (d-c+1) / (1-d))], (x, c, d)) def _ppf(self, q, c, d): qc, qd = self._cdf(c, c, d), self._cdf(d, c, d) condlist = [q < qc, q <= qd, q > qd] choicelist = [np.sqrt(q * c * (1 + d - c)), 0.5 * q * (1 + d - c) + 0.5 * c, 1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))] return np.select(condlist, choicelist) trapz = trapz_gen(a=0.0, b=1.0, name="trapz") class triang_gen(rv_continuous): r"""A triangular continuous random variable. %(before_notes)s Notes ----- The triangular distribution can be represented with an up-sloping line from ``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)`` to ``(loc+scale)``. `triang` takes :math:`c` as a shape parameter. %(after_notes)s The standard form is in the range [0, 1] with c the mode. The location parameter shifts the start to `loc`. The scale parameter changes the width from 1 to `scale`. %(example)s """ def _rvs(self, c): return self._random_state.triangular(0, c, 1, self._size) def _argcheck(self, c): return (c >= 0) & (c <= 1) def _pdf(self, x, c): # 0: edge case where c=0 # 1: generalised case for x < c, don't use x <= c, as it doesn't cope # with c = 0. # 2: generalised case for x >= c, but doesn't cope with c = 1 # 3: edge case where c=1 r = _lazyselect([c == 0, x < c, (x >= c) & (c != 1), c == 1], [lambda x, c: 2 - 2 * x, lambda x, c: 2 * x / c, lambda x, c: 2 * (1 - x) / (1 - c), lambda x, c: 2 * x], (x, c)) return r def _cdf(self, x, c): r = _lazyselect([c == 0, x < c, (x >= c) & (c != 1), c == 1], [lambda x, c: 2*x - x*x, lambda x, c: x * x / c, lambda x, c: (x*x - 2*x + c) / (c-1), lambda x, c: x * x], (x, c)) return r def _ppf(self, q, c): return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q))) def _stats(self, c): return ((c+1.0)/3.0, (1.0-c+c*c)/18, np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)), -3.0/5.0) def _entropy(self, c): return 0.5-np.log(2) triang = triang_gen(a=0.0, b=1.0, name="triang") class truncexpon_gen(rv_continuous): r"""A truncated exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `truncexpon` is: .. math:: f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)} for :math:`0 < x < b`. `truncexpon` takes :math:`b` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, b): self.b = b return b > 0 def _pdf(self, x, b): # truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b)) return np.exp(-x)/(-sc.expm1(-b)) def _logpdf(self, x, b): return -x - np.log(-sc.expm1(-b)) def _cdf(self, x, b): return sc.expm1(-x)/sc.expm1(-b) def _ppf(self, q, b): return -sc.log1p(q*sc.expm1(-b)) def _munp(self, n, b): # wrong answer with formula, same as in continuous.pdf # return sc.gamman+1)-sc.gammainc1+n, b) if n == 1: return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b)) elif n == 2: return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b)) else: # return generic for higher moments # return rv_continuous._mom1_sc(self, n, b) return self._mom1_sc(n, b) def _entropy(self, b): eB = np.exp(b) return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB) truncexpon = truncexpon_gen(a=0.0, name='truncexpon') class truncnorm_gen(rv_continuous): r"""A truncated normal continuous random variable. %(before_notes)s Notes ----- The standard form of this distribution is a standard normal truncated to the range [a, b] --- notice that a and b are defined over the domain of the standard normal. To convert clip values for a specific mean and standard deviation, use:: a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std `truncnorm` takes :math:`a` and :math:`b` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, a, b): self.a = a self.b = b self._nb = _norm_cdf(b) self._na = _norm_cdf(a) self._sb = _norm_sf(b) self._sa = _norm_sf(a) self._delta = np.where(self.a > 0, -(self._sb - self._sa), self._nb - self._na) self._logdelta = np.log(self._delta) return a != b def _pdf(self, x, a, b): return _norm_pdf(x) / self._delta def _logpdf(self, x, a, b): return _norm_logpdf(x) - self._logdelta def _cdf(self, x, a, b): return (_norm_cdf(x) - self._na) / self._delta def _ppf(self, q, a, b): # XXX Use _lazywhere... ppf = np.where(self.a > 0, _norm_isf(q*self._sb + self._sa*(1.0-q)), _norm_ppf(q*self._nb + self._na*(1.0-q))) return ppf def _stats(self, a, b): nA, nB = self._na, self._nb d = nB - nA pA, pB = _norm_pdf(a), _norm_pdf(b) mu = (pA - pB) / d # correction sign mu2 = 1 + (a*pA - b*pB) / d - mu*mu return mu, mu2, None, None truncnorm = truncnorm_gen(name='truncnorm') # FIXME: RVS does not work. class tukeylambda_gen(rv_continuous): r"""A Tukey-Lamdba continuous random variable. %(before_notes)s Notes ----- A flexible distribution, able to represent and interpolate between the following distributions: - Cauchy (lam=-1) - logistic (lam=0.0) - approx Normal (lam=0.14) - u-shape (lam = 0.5) - uniform from -1 to 1 (lam = 1) `tukeylambda` takes ``lam`` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, lam): return np.ones(np.shape(lam), dtype=bool) def _pdf(self, x, lam): Fx = np.asarray(sc.tklmbda(x, lam)) Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0) Px = 1.0/np.asarray(Px) return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0) def _cdf(self, x, lam): return sc.tklmbda(x, lam) def _ppf(self, q, lam): return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam) def _stats(self, lam): return 0, _tlvar(lam), 0, _tlkurt(lam) def _entropy(self, lam): def integ(p): return np.log(pow(p, lam-1)+pow(1-p, lam-1)) return integrate.quad(integ, 0, 1)[0] tukeylambda = tukeylambda_gen(name='tukeylambda') class FitUniformFixedScaleDataError(FitDataError): def __init__(self, ptp, fscale): self.args = ( "Invalid values in `data`. Maximum likelihood estimation with " "the uniform distribution and fixed scale requires that " "data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." % (ptp, fscale), ) class uniform_gen(rv_continuous): r"""A uniform continuous random variable. This distribution is constant between `loc` and ``loc + scale``. %(before_notes)s %(example)s """ def _rvs(self): return self._random_state.uniform(0.0, 1.0, self._size) def _pdf(self, x): return 1.0*(x == x) def _cdf(self, x): return x def _ppf(self, q): return q def _stats(self): return 0.5, 1.0/12, 0, -1.2 def _entropy(self): return 0.0 def fit(self, data, *args, **kwds): """ Maximum likelihood estimate for the location and scale parameters. `uniform.fit` uses only the following parameters. Because exact formulas are used, the parameters related to optimization that are available in the `fit` method of other distributions are ignored here. The only positional argument accepted is `data`. Parameters ---------- data : array_like Data to use in calculating the maximum likelihood estimate. floc : float, optional Hold the location parameter fixed to the specified value. fscale : float, optional Hold the scale parameter fixed to the specified value. Returns ------- loc, scale : float Maximum likelihood estimates for the location and scale. Notes ----- An error is raised if `floc` is given and any values in `data` are less than `floc`, or if `fscale` is given and `fscale` is less than ``data.max() - data.min()``. An error is also raised if both `floc` and `fscale` are given. Examples -------- >>> from scipy.stats import uniform We'll fit the uniform distribution to `x`: >>> x = np.array([2, 2.5, 3.1, 9.5, 13.0]) For a uniform distribution MLE, the location is the minimum of the data, and the scale is the maximum minus the minimum. >>> loc, scale = uniform.fit(x) >>> loc 2.0 >>> scale 11.0 If we know the data comes from a uniform distribution where the support starts at 0, we can use `floc=0`: >>> loc, scale = uniform.fit(x, floc=0) >>> loc 0.0 >>> scale 13.0 Alternatively, if we know the length of the support is 12, we can use `fscale=12`: >>> loc, scale = uniform.fit(x, fscale=12) >>> loc 1.5 >>> scale 12.0 In that last example, the support interval is [1.5, 13.5]. This solution is not unique. For example, the distribution with ``loc=2`` and ``scale=12`` has the same likelihood as the one above. When `fscale` is given and it is larger than ``data.max() - data.min()``, the parameters returned by the `fit` method center the support over the interval ``[data.min(), data.max()]``. """ if len(args) > 0: raise TypeError("Too many arguments.") floc = kwds.pop('floc', None) fscale = kwds.pop('fscale', None) # Ignore the optimizer-related keyword arguments, if given. kwds.pop('loc', None) kwds.pop('scale', None) kwds.pop('optimizer', None) if kwds: raise TypeError("Unknown arguments: %s." % kwds) if floc is not None and fscale is not None: # This check is for consistency with `rv_continuous.fit`. raise ValueError("All parameters fixed. There is nothing to " "optimize.") data = np.asarray(data) # MLE for the uniform distribution # -------------------------------- # The PDF is # # f(x, loc, scale) = {1/scale for loc <= x <= loc + scale # {0 otherwise} # # The likelihood function is # L(x, loc, scale) = (1/scale)**n # where n is len(x), assuming loc <= x <= loc + scale for all x. # The log-likelihood is # l(x, loc, scale) = -n*log(scale) # The log-likelihood is maximized by making scale as small as possible, # while keeping loc <= x <= loc + scale. So if neither loc nor scale # are fixed, the log-likelihood is maximized by choosing # loc = x.min() # scale = x.ptp() # If loc is fixed, it must be less than or equal to x.min(), and then # the scale is # scale = x.max() - loc # If scale is fixed, it must not be less than x.ptp(). If scale is # greater than x.ptp(), the solution is not unique. Note that the # likelihood does not depend on loc, except for the requirement that # loc <= x <= loc + scale. All choices of loc for which # x.max() - scale <= loc <= x.min() # have the same log-likelihood. In this case, we choose loc such that # the support is centered over the interval [data.min(), data.max()]: # loc = x.min() = 0.5*(scale - x.ptp()) if fscale is None: # scale is not fixed. if floc is None: # loc is not fixed, scale is not fixed. loc = data.min() scale = data.ptp() else: # loc is fixed, scale is not fixed. loc = floc scale = data.max() - loc if data.min() < loc: raise FitDataError("uniform", lower=loc, upper=loc + scale) else: # loc is not fixed, scale is fixed. ptp = data.ptp() if ptp > fscale: raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale) # If ptp < fscale, the ML estimate is not unique; see the comments # above. We choose the distribution for which the support is # centered over the interval [data.min(), data.max()]. loc = data.min() - 0.5*(fscale - ptp) scale = fscale # We expect the return values to be floating point, so ensure it # by explicitly converting to float. return float(loc), float(scale) uniform = uniform_gen(a=0.0, b=1.0, name='uniform') class vonmises_gen(rv_continuous): r"""A Von Mises continuous random variable. %(before_notes)s Notes ----- If `x` is not in range or `loc` is not in range it assumes they are angles and converts them to [-\pi, \pi] equivalents. The probability density function for `vonmises` is: .. math:: f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I[0](\kappa) } for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. `vonmises` takes :math:`\kappa` as a shape parameter. %(after_notes)s See Also -------- vonmises_line : The same distribution, defined on a [-\pi, \pi] segment of the real line. %(example)s """ def _rvs(self, kappa): return self._random_state.vonmises(0.0, kappa, size=self._size) def _pdf(self, x, kappa): # vonmises.pdf(x, \kappa) = exp(\kappa * cos(x)) / (2*pi*I[0](\kappa)) return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa)) def _cdf(self, x, kappa): return _stats.von_mises_cdf(kappa, x) def _stats_skip(self, kappa): return 0, None, 0, None def _entropy(self, kappa): return (-kappa * sc.i1(kappa) / sc.i0(kappa) + np.log(2 * np.pi * sc.i0(kappa))) vonmises = vonmises_gen(name='vonmises') vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line') class wald_gen(invgauss_gen): r"""A Wald continuous random variable. %(before_notes)s Notes ----- The probability density function for `wald` is: .. math:: f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x }) for :math:`x > 0`. `wald` is a special case of `invgauss` with ``mu == 1``. %(after_notes)s %(example)s """ _support_mask = rv_continuous._open_support_mask def _rvs(self): return self._random_state.wald(1.0, 1.0, size=self._size) def _pdf(self, x): # wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x)) return invgauss._pdf(x, 1.0) def _logpdf(self, x): return invgauss._logpdf(x, 1.0) def _cdf(self, x): return invgauss._cdf(x, 1.0) def _stats(self): return 1.0, 1.0, 3.0, 15.0 wald = wald_gen(a=0.0, name="wald") class wrapcauchy_gen(rv_continuous): r"""A wrapped Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `wrapcauchy` is: .. math:: f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))} for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`. `wrapcauchy` takes :math:`c` as a shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, c): return (c > 0) & (c < 1) def _pdf(self, x, c): # wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x))) return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x))) def _cdf(self, x, c): output = np.zeros(x.shape, dtype=x.dtype) val = (1.0+c)/(1.0-c) c1 = x < np.pi c2 = 1-c1 xp = np.extract(c1, x) xn = np.extract(c2, x) if np.any(xn): valn = np.extract(c2, np.ones_like(x)*val) xn = 2*np.pi - xn yn = np.tan(xn/2.0) on = 1.0-1.0/np.pi*np.arctan(valn*yn) np.place(output, c2, on) if np.any(xp): valp = np.extract(c1, np.ones_like(x)*val) yp = np.tan(xp/2.0) op = 1.0/np.pi*np.arctan(valp*yp) np.place(output, c1, op) return output def _ppf(self, q, c): val = (1.0-c)/(1.0+c) rcq = 2*np.arctan(val*np.tan(np.pi*q)) rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q))) return np.where(q < 1.0/2, rcq, rcmq) def _entropy(self, c): return np.log(2*np.pi*(1-c*c)) wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy') class gennorm_gen(rv_continuous): r"""A generalized normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `gennorm` is [1]_:: beta gennorm.pdf(x, beta) = --------------- exp(-|x|**beta) 2 gamma(1/beta) `gennorm` takes :math:`\beta` as a shape parameter. For :math:`\beta = 1`, it is identical to a Laplace distribution. For ``\beta = 2``, it is identical to a normal distribution (with :math:`scale=1/\sqrt{2}`). See Also -------- laplace : Laplace distribution norm : normal distribution References ---------- .. [1] "Generalized normal distribution, Version 1", https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 %(example)s """ def _pdf(self, x, beta): return np.exp(self._logpdf(x, beta)) def _logpdf(self, x, beta): return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta def _cdf(self, x, beta): c = 0.5 * np.sign(x) # evaluating (.5 + c) first prevents numerical cancellation return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta) def _ppf(self, x, beta): c = np.sign(x - 0.5) # evaluating (1. + c) first prevents numerical cancellation return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta) def _sf(self, x, beta): return self._cdf(-x, beta) def _isf(self, x, beta): return -self._ppf(x, beta) def _stats(self, beta): c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta]) return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3. def _entropy(self, beta): return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta) gennorm = gennorm_gen(name='gennorm') class halfgennorm_gen(rv_continuous): r"""The upper half of a generalized normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `halfgennorm` is: .. math:: f(x, \beta) = \frac{\beta}{\gamma(1/\beta)} \exp(-|x|^\beta) `gennorm` takes :math:`\beta` as a shape parameter. For :math:`\beta = 1`, it is identical to an exponential distribution. For :math:`\beta = 2`, it is identical to a half normal distribution (with :math:`scale=1/\sqrt{2}`). See Also -------- gennorm : generalized normal distribution expon : exponential distribution halfnorm : half normal distribution References ---------- .. [1] "Generalized normal distribution, Version 1", https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1 %(example)s """ def _pdf(self, x, beta): # beta # halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta) # gamma(1/beta) return np.exp(self._logpdf(x, beta)) def _logpdf(self, x, beta): return np.log(beta) - sc.gammaln(1.0/beta) - x**beta def _cdf(self, x, beta): return sc.gammainc(1.0/beta, x**beta) def _ppf(self, x, beta): return sc.gammaincinv(1.0/beta, x)**(1.0/beta) def _sf(self, x, beta): return sc.gammaincc(1.0/beta, x**beta) def _isf(self, x, beta): return sc.gammainccinv(1.0/beta, x)**(1.0/beta) def _entropy(self, beta): return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta) halfgennorm = halfgennorm_gen(a=0, name='halfgennorm') class crystalball_gen(rv_continuous): r""" Crystalball distribution %(before_notes)s Notes ----- The probability density function for `crystalball` is: .. math:: f(x, \beta, m) = \begin{cases} N \exp(-x^2 / 2), &\text{for } x > -\beta\\ N A (B - x)^{-m} &\text{for } x \le -\beta \end{cases} where :math:`A = (m / |beta|)**n * exp(-beta**2 / 2)`, :math:`B = m/|beta| - |beta|` and :math:`N` is a normalisation constant. `crystalball` takes :math:`\beta` and :math:`m` as shape parameters. :math:`\beta` defines the point where the pdf changes from a power-law to a gaussian distribution :math:`m` is power of the power-law tail. References ---------- .. [1] "Crystal Ball Function", https://en.wikipedia.org/wiki/Crystal_Ball_function %(after_notes)s .. versionadded:: 0.19.0 %(example)s """ def _pdf(self, x, beta, m): """ Return PDF of the crystalball function. -- | exp(-x**2 / 2), for x > -beta crystalball.pdf(x, beta, m) = N * | | A * (B - x)**(-m), for x <= -beta -- """ N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta)) rhs = lambda x, beta, m: np.exp(-x**2 / 2) lhs = lambda x, beta, m: (m/beta)**m * np.exp(-beta**2 / 2.0) * (m/beta - beta - x)**(-m) return N * _lazywhere(np.atleast_1d(x > -beta), (x, beta, m), f=rhs, f2=lhs) def _cdf(self, x, beta, m): """ Return CDF of the crystalball function """ N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta)) rhs = lambda x, beta, m: (m/beta) * np.exp(-beta**2 / 2.0) / (m-1) + _norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)) lhs = lambda x, beta, m: (m/beta)**m * np.exp(-beta**2 / 2.0) * (m/beta - beta - x)**(-m+1) / (m-1) return N * _lazywhere(np.atleast_1d(x > -beta), (x, beta, m), f=rhs, f2=lhs) def _munp(self, n, beta, m): """ Returns the n-th non-central moment of the crystalball function. """ N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) + _norm_pdf_C * _norm_cdf(beta)) def n_th_moment(n, beta, m): """ Returns n-th moment. Defined only if n+1 < m Function cannot broadcast due to the loop over n """ A = (m/beta)**m * np.exp(-beta**2 / 2.0) B = m/beta - beta rhs = 2**((n-1)/2.0) * sc.gamma((n+1)/2) * (1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)) lhs = np.zeros(rhs.shape) for k in range(n + 1): lhs += sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) * (m/beta)**(-m + k + 1) return A * lhs + rhs return N * _lazywhere(np.atleast_1d(n + 1 < m), (n, beta, m), np.vectorize(n_th_moment, otypes=[np.float]), np.inf) def _argcheck(self, beta, m): """ In HEP crystal-ball is also defined for m = 1 (see plot on wikipedia) But the function doesn't have a finite integral in this corner case, and isn't a PDF anymore (but can still be used on a finite range). Here we restrict the function to m > 1. In addition we restrict beta to be positive """ return (m > 1) & (beta > 0) crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function") def _argus_phi(chi): """ Utility function for the argus distribution used in the CDF and norm of the Argus Funktion """ return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5 class argus_gen(rv_continuous): r""" Argus distribution %(before_notes)s Notes ----- The probability density function for `argus` is: .. math:: f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2} \exp(- 0.5 \chi^2 (1 - x^2)) where: .. math:: \Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2 with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard normal distribution, respectively. `argus` takes :math:`\chi` as shape a parameter. References ---------- .. [1] "ARGUS distribution", https://en.wikipedia.org/wiki/ARGUS_distribution %(after_notes)s .. versionadded:: 0.19.0 %(example)s """ def _pdf(self, x, chi): """ Return PDF of the argus function argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x * sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2)) """ y = 1.0 - x**2 return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2) def _cdf(self, x, chi): """ Return CDF of the argus function """ return 1.0 - self._sf(x, chi) def _sf(self, x, chi): """ Return survival function of the argus function """ return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi) argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0) class rv_histogram(rv_continuous): """ Generates a distribution given by a histogram. This is useful to generate a template distribution from a binned datasample. As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it a collection of generic methods (see `rv_continuous` for the full list), and implements them based on the properties of the provided binned datasample. Parameters ---------- histogram : tuple of array_like Tuple containing two array_like objects The first containing the content of n bins The second containing the (n+1) bin boundaries In particular the return value np.histogram is accepted Notes ----- There are no additional shape parameters except for the loc and scale. The pdf is defined as a stepwise function from the provided histogram The cdf is a linear interpolation of the pdf. .. versionadded:: 0.19.0 Examples -------- Create a scipy.stats distribution from a numpy histogram >>> import scipy.stats >>> import numpy as np >>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123) >>> hist = np.histogram(data, bins=100) >>> hist_dist = scipy.stats.rv_histogram(hist) Behaves like an ordinary scipy rv_continuous distribution >>> hist_dist.pdf(1.0) 0.20538577847618705 >>> hist_dist.cdf(2.0) 0.90818568543056499 PDF is zero above (below) the highest (lowest) bin of the histogram, defined by the max (min) of the original dataset >>> hist_dist.pdf(np.max(data)) 0.0 >>> hist_dist.cdf(np.max(data)) 1.0 >>> hist_dist.pdf(np.min(data)) 7.7591907244498314e-05 >>> hist_dist.cdf(np.min(data)) 0.0 PDF and CDF follow the histogram >>> import matplotlib.pyplot as plt >>> X = np.linspace(-5.0, 5.0, 100) >>> plt.title("PDF from Template") >>> plt.hist(data, density=True, bins=100) >>> plt.plot(X, hist_dist.pdf(X), label='PDF') >>> plt.plot(X, hist_dist.cdf(X), label='CDF') >>> plt.show() """ _support_mask = rv_continuous._support_mask def __init__(self, histogram, *args, **kwargs): """ Create a new distribution using the given histogram Parameters ---------- histogram : tuple of array_like Tuple containing two array_like objects The first containing the content of n bins The second containing the (n+1) bin boundaries In particular the return value np.histogram is accepted """ self._histogram = histogram if len(histogram) != 2: raise ValueError("Expected length 2 for parameter histogram") self._hpdf = np.asarray(histogram[0]) self._hbins = np.asarray(histogram[1]) if len(self._hpdf) + 1 != len(self._hbins): raise ValueError("Number of elements in histogram content " "and histogram boundaries do not match, " "expected n and n+1.") self._hbin_widths = self._hbins[1:] - self._hbins[:-1] self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths)) self._hcdf = np.cumsum(self._hpdf * self._hbin_widths) self._hpdf = np.hstack([0.0, self._hpdf, 0.0]) self._hcdf = np.hstack([0.0, self._hcdf]) # Set support kwargs['a'] = self._hbins[0] kwargs['b'] = self._hbins[-1] super(rv_histogram, self).__init__(*args, **kwargs) def _pdf(self, x): """ PDF of the histogram """ return self._hpdf[np.searchsorted(self._hbins, x, side='right')] def _cdf(self, x): """ CDF calculated from the histogram """ return np.interp(x, self._hbins, self._hcdf) def _ppf(self, x): """ Percentile function calculated from the histogram """ return np.interp(x, self._hcdf, self._hbins) def _munp(self, n): """Compute the n-th non-central moment.""" integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1) return np.sum(self._hpdf[1:-1] * integrals) def _entropy(self): """Compute entropy of distribution""" res = _lazywhere(self._hpdf[1:-1] > 0.0, (self._hpdf[1:-1],), np.log, 0.0) return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths) def _updated_ctor_param(self): """ Set the histogram as additional constructor argument """ dct = super(rv_histogram, self)._updated_ctor_param() dct['histogram'] = self._histogram return dct # Collect names of classes and objects in this module. pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous) __all__ = _distn_names + _distn_gen_names + ['rv_histogram']
186,898
26.912037
124
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/distributions.py
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # # NOTE: To look at history using `git blame`, use `git blame -M -C -C` # instead of `git blame -Lxxx,+x`. # from __future__ import division, print_function, absolute_import from ._distn_infrastructure import (entropy, rv_discrete, rv_continuous, rv_frozen) from . import _continuous_distns from . import _discrete_distns from ._continuous_distns import * from ._discrete_distns import * # For backwards compatibility e.g. pymc expects distributions.__all__. __all__ = ['entropy', 'rv_discrete', 'rv_continuous', 'rv_histogram'] # Add only the distribution names, not the *_gen names. __all__ += _continuous_distns._distn_names __all__ += _discrete_distns._distn_names
819
31.8
72
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_binned_statistic.py
from __future__ import division, print_function, absolute_import import numpy as np from scipy._lib.six import callable, xrange from scipy._lib._numpy_compat import suppress_warnings from collections import namedtuple __all__ = ['binned_statistic', 'binned_statistic_2d', 'binned_statistic_dd'] BinnedStatisticResult = namedtuple('BinnedStatisticResult', ('statistic', 'bin_edges', 'binnumber')) def binned_statistic(x, values, statistic='mean', bins=10, range=None): """ Compute a binned statistic for one or more sets of data. This is a generalization of a histogram function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values (or set of values) within each bin. Parameters ---------- x : (N,) array_like A sequence of values to be binned. values : (N,) array_like or list of (N,) array_like The data on which the statistic will be computed. This must be the same shape as `x`, or a set of sequences - each the same shape as `x`. If `values` is a set of sequences, the statistic will be computed on each independently. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * 'min' : compute the minimum of values for points within each bin. Empty bins will be represented by NaN. * 'max' : compute the maximum of values for point within each bin. Empty bins will be represented by NaN. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or sequence of scalars, optional If `bins` is an int, it defines the number of equal-width bins in the given range (10 by default). If `bins` is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. Values in `x` that are smaller than lowest bin edge are assigned to bin number 0, values beyond the highest bin are assigned to ``bins[-1]``. If the bin edges are specified, the number of bins will be, (nx = len(bins)-1). range : (float, float) or [(float, float)], optional The lower and upper range of the bins. If not provided, range is simply ``(x.min(), x.max())``. Values outside the range are ignored. Returns ------- statistic : array The values of the selected statistic in each bin. bin_edges : array of dtype float Return the bin edges ``(length(statistic)+1)``. binnumber: 1-D ndarray of ints Indices of the bins (corresponding to `bin_edges`) in which each value of `x` belongs. Same length as `values`. A binnumber of `i` means the corresponding value is between (bin_edges[i-1], bin_edges[i]). See Also -------- numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd Notes ----- All but the last (righthand-most) bin is half-open. In other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. .. versionadded:: 0.11.0 Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt First some basic examples: Create two evenly spaced bins in the range of the given sample, and sum the corresponding values in each of those bins: >>> values = [1.0, 1.0, 2.0, 1.5, 3.0] >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) (array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2])) Multiple arrays of values can also be passed. The statistic is calculated on each set independently: >>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]] >>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2) (array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2])) >>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean', ... bins=3) (array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3])) As a second example, we now generate some random data of sailing boat speed as a function of wind speed, and then determine how fast our boat is for certain wind speeds: >>> windspeed = 8 * np.random.rand(500) >>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed, ... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7]) >>> plt.figure() >>> plt.plot(windspeed, boatspeed, 'b.', label='raw data') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5, ... label='binned statistic of data') >>> plt.legend() Now we can use ``binnumber`` to select all datapoints with a windspeed below 1: >>> low_boatspeed = boatspeed[binnumber == 0] As a final example, we will use ``bin_edges`` and ``binnumber`` to make a plot of a distribution that shows the mean and distribution around that mean per bin, on top of a regular histogram and the probability distribution function: >>> x = np.linspace(0, 5, num=500) >>> x_pdf = stats.maxwell.pdf(x) >>> samples = stats.maxwell.rvs(size=10000) >>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf, ... statistic='mean', bins=25) >>> bin_width = (bin_edges[1] - bin_edges[0]) >>> bin_centers = bin_edges[1:] - bin_width/2 >>> plt.figure() >>> plt.hist(samples, bins=50, density=True, histtype='stepfilled', ... alpha=0.2, label='histogram of data') >>> plt.plot(x, x_pdf, 'r-', label='analytical pdf') >>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2, ... label='binned statistic of data') >>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5) >>> plt.legend(fontsize=10) >>> plt.show() """ try: N = len(bins) except TypeError: N = 1 if N != 1: bins = [np.asarray(bins, float)] if range is not None: if len(range) == 2: range = [range] medians, edges, binnumbers = binned_statistic_dd( [x], values, statistic, bins, range) return BinnedStatisticResult(medians, edges[0], binnumbers) BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult', ('statistic', 'x_edge', 'y_edge', 'binnumber')) def binned_statistic_2d(x, y, values, statistic='mean', bins=10, range=None, expand_binnumbers=False): """ Compute a bidimensional binned statistic for one or more sets of data. This is a generalization of a histogram2d function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values (or set of values) within each bin. Parameters ---------- x : (N,) array_like A sequence of values to be binned along the first dimension. y : (N,) array_like A sequence of values to be binned along the second dimension. values : (N,) array_like or list of (N,) array_like The data on which the statistic will be computed. This must be the same shape as `x`, or a list of sequences - each with the same shape as `x`. If `values` is such a list, the statistic will be computed on each independently. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * 'min' : compute the minimum of values for points within each bin. Empty bins will be represented by NaN. * 'max' : compute the maximum of values for point within each bin. Empty bins will be represented by NaN. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : int or [int, int] or array_like or [array, array], optional The bin specification: * the number of bins for the two dimensions (nx = ny = bins), * the number of bins in each dimension (nx, ny = bins), * the bin edges for the two dimensions (x_edge = y_edge = bins), * the bin edges in each dimension (x_edge, y_edge = bins). If the bin edges are specified, the number of bins will be, (nx = len(x_edge)-1, ny = len(y_edge)-1). range : (2,2) array_like, optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be considered outliers and not tallied in the histogram. expand_binnumbers : bool, optional 'False' (default): the returned `binnumber` is a shape (N,) array of linearized bin indices. 'True': the returned `binnumber` is 'unraveled' into a shape (2,N) ndarray, where each row gives the bin numbers in the corresponding dimension. See the `binnumber` returned value, and the `Examples` section. .. versionadded:: 0.17.0 Returns ------- statistic : (nx, ny) ndarray The values of the selected statistic in each two-dimensional bin. x_edge : (nx + 1) ndarray The bin edges along the first dimension. y_edge : (ny + 1) ndarray The bin edges along the second dimension. binnumber : (N,) array of ints or (2,N) ndarray of ints This assigns to each element of `sample` an integer that represents the bin in which this observation falls. The representation depends on the `expand_binnumbers` argument. See `Notes` for details. See Also -------- numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd Notes ----- Binedges: All but the last (righthand-most) bin is half-open. In other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. `binnumber`: This returned argument assigns to each element of `sample` an integer that represents the bin in which it belongs. The representation depends on the `expand_binnumbers` argument. If 'False' (default): The returned `binnumber` is a shape (N,) array of linearized indices mapping each element of `sample` to its corresponding bin (using row-major ordering). If 'True': The returned `binnumber` is a shape (2,N) ndarray where each row indicates bin placements for each dimension respectively. In each dimension, a binnumber of `i` means the corresponding value is between (D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'. .. versionadded:: 0.11.0 Examples -------- >>> from scipy import stats Calculate the counts with explicit bin-edges: >>> x = [0.1, 0.1, 0.1, 0.6] >>> y = [2.1, 2.6, 2.1, 2.1] >>> binx = [0.0, 0.5, 1.0] >>> biny = [2.0, 2.5, 3.0] >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny]) >>> ret.statistic array([[ 2., 1.], [ 1., 0.]]) The bin in which each sample is placed is given by the `binnumber` returned parameter. By default, these are the linearized bin indices: >>> ret.binnumber array([5, 6, 5, 9]) The bin indices can also be expanded into separate entries for each dimension using the `expand_binnumbers` parameter: >>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny], ... expand_binnumbers=True) >>> ret.binnumber array([[1, 1, 1, 2], [1, 2, 1, 1]]) Which shows that the first three elements belong in the xbin 1, and the fourth into xbin 2; and so on for y. """ # This code is based on np.histogram2d try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = np.asarray(bins, float) bins = [xedges, yedges] medians, edges, binnumbers = binned_statistic_dd( [x, y], values, statistic, bins, range, expand_binnumbers=expand_binnumbers) return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers) BinnedStatisticddResult = namedtuple('BinnedStatisticddResult', ('statistic', 'bin_edges', 'binnumber')) def binned_statistic_dd(sample, values, statistic='mean', bins=10, range=None, expand_binnumbers=False): """ Compute a multidimensional binned statistic for a set of data. This is a generalization of a histogramdd function. A histogram divides the space into bins, and returns the count of the number of points in each bin. This function allows the computation of the sum, mean, median, or other statistic of the values within each bin. Parameters ---------- sample : array_like Data to histogram passed as a sequence of D arrays of length N, or as an (N,D) array. values : (N,) array_like or list of (N,) array_like The data on which the statistic will be computed. This must be the same shape as `x`, or a list of sequences - each with the same shape as `x`. If `values` is such a list, the statistic will be computed on each independently. statistic : string or callable, optional The statistic to compute (default is 'mean'). The following statistics are available: * 'mean' : compute the mean of values for points within each bin. Empty bins will be represented by NaN. * 'median' : compute the median of values for points within each bin. Empty bins will be represented by NaN. * 'count' : compute the count of points within each bin. This is identical to an unweighted histogram. `values` array is not referenced. * 'sum' : compute the sum of values for points within each bin. This is identical to a weighted histogram. * 'min' : compute the minimum of values for points within each bin. Empty bins will be represented by NaN. * 'max' : compute the maximum of values for point within each bin. Empty bins will be represented by NaN. * function : a user-defined function which takes a 1D array of values, and outputs a single numerical statistic. This function will be called on the values in each bin. Empty bins will be represented by function([]), or NaN if this returns an error. bins : sequence or int, optional The bin specification must be in one of the following forms: * A sequence of arrays describing the bin edges along each dimension. * The number of bins for each dimension (nx, ny, ... = bins). * The number of bins for all dimensions (nx = ny = ... = bins). range : sequence, optional A sequence of lower and upper bin edges to be used if the edges are not given explicitly in `bins`. Defaults to the minimum and maximum values along each dimension. expand_binnumbers : bool, optional 'False' (default): the returned `binnumber` is a shape (N,) array of linearized bin indices. 'True': the returned `binnumber` is 'unraveled' into a shape (D,N) ndarray, where each row gives the bin numbers in the corresponding dimension. See the `binnumber` returned value, and the `Examples` section of `binned_statistic_2d`. .. versionadded:: 0.17.0 Returns ------- statistic : ndarray, shape(nx1, nx2, nx3,...) The values of the selected statistic in each two-dimensional bin. bin_edges : list of ndarrays A list of D arrays describing the (nxi + 1) bin edges for each dimension. binnumber : (N,) array of ints or (D,N) ndarray of ints This assigns to each element of `sample` an integer that represents the bin in which this observation falls. The representation depends on the `expand_binnumbers` argument. See `Notes` for details. See Also -------- numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d Notes ----- Binedges: All but the last (righthand-most) bin is half-open in each dimension. In other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes* 4. `binnumber`: This returned argument assigns to each element of `sample` an integer that represents the bin in which it belongs. The representation depends on the `expand_binnumbers` argument. If 'False' (default): The returned `binnumber` is a shape (N,) array of linearized indices mapping each element of `sample` to its corresponding bin (using row-major ordering). If 'True': The returned `binnumber` is a shape (D,N) ndarray where each row indicates bin placements for each dimension respectively. In each dimension, a binnumber of `i` means the corresponding value is between (bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'. .. versionadded:: 0.11.0 """ known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max'] if not callable(statistic) and statistic not in known_stats: raise ValueError('invalid statistic %r' % (statistic,)) # `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`) # `Dlen` is the length of elements along each dimension. # This code is based on np.histogramdd try: # `sample` is an ND-array. Dlen, Ndim = sample.shape except (AttributeError, ValueError): # `sample` is a sequence of 1D arrays. sample = np.atleast_2d(sample).T Dlen, Ndim = sample.shape # Store initial shape of `values` to preserve it in the output values = np.asarray(values) input_shape = list(values.shape) # Make sure that `values` is 2D to iterate over rows values = np.atleast_2d(values) Vdim, Vlen = values.shape # Make sure `values` match `sample` if(statistic != 'count' and Vlen != Dlen): raise AttributeError('The number of `values` elements must match the ' 'length of each `sample` dimension.') nbin = np.empty(Ndim, int) # Number of bins in each dimension edges = Ndim * [None] # Bin edges for each dim (will be 2D array) dedges = Ndim * [None] # Spacing between edges (will be 2D array) try: M = len(bins) if M != Ndim: raise AttributeError('The dimension of bins must be equal ' 'to the dimension of the sample x.') except TypeError: bins = Ndim * [bins] # Select range for each dimension # Used only if number of bins is given. if range is None: smin = np.atleast_1d(np.array(sample.min(axis=0), float)) smax = np.atleast_1d(np.array(sample.max(axis=0), float)) else: smin = np.zeros(Ndim) smax = np.zeros(Ndim) for i in xrange(Ndim): smin[i], smax[i] = range[i] # Make sure the bins have a finite width. for i in xrange(len(smin)): if smin[i] == smax[i]: smin[i] = smin[i] - .5 smax[i] = smax[i] + .5 # Create edge arrays for i in xrange(Ndim): if np.isscalar(bins[i]): nbin[i] = bins[i] + 2 # +2 for outlier bins edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1) else: edges[i] = np.asarray(bins[i], float) nbin[i] = len(edges[i]) + 1 # +1 for outlier bins dedges[i] = np.diff(edges[i]) nbin = np.asarray(nbin) # Compute the bin number each sample falls into, in each dimension sampBin = [ np.digitize(sample[:, i], edges[i]) for i in xrange(Ndim) ] # Using `digitize`, values that fall on an edge are put in the right bin. # For the rightmost bin, we want values equal to the right # edge to be counted in the last bin, and not as an outlier. for i in xrange(Ndim): # Find the rounding precision decimal = int(-np.log10(dedges[i].min())) + 6 # Find which points are on the rightmost edge. on_edge = np.where(np.around(sample[:, i], decimal) == np.around(edges[i][-1], decimal))[0] # Shift these points one bin to the left. sampBin[i][on_edge] -= 1 # Compute the sample indices in the flattened statistic matrix. binnumbers = np.ravel_multi_index(sampBin, nbin) result = np.empty([Vdim, nbin.prod()], float) if statistic == 'mean': result.fill(np.nan) flatcount = np.bincount(binnumbers, None) a = flatcount.nonzero() for vv in xrange(Vdim): flatsum = np.bincount(binnumbers, values[vv]) result[vv, a] = flatsum[a] / flatcount[a] elif statistic == 'std': result.fill(0) flatcount = np.bincount(binnumbers, None) a = flatcount.nonzero() for vv in xrange(Vdim): flatsum = np.bincount(binnumbers, values[vv]) flatsum2 = np.bincount(binnumbers, values[vv] ** 2) result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] - (flatsum[a] / flatcount[a]) ** 2) elif statistic == 'count': result.fill(0) flatcount = np.bincount(binnumbers, None) a = np.arange(len(flatcount)) result[:, a] = flatcount[np.newaxis, :] elif statistic == 'sum': result.fill(0) for vv in xrange(Vdim): flatsum = np.bincount(binnumbers, values[vv]) a = np.arange(len(flatsum)) result[vv, a] = flatsum elif statistic == 'median': result.fill(np.nan) for i in np.unique(binnumbers): for vv in xrange(Vdim): result[vv, i] = np.median(values[vv, binnumbers == i]) elif statistic == 'min': result.fill(np.nan) for i in np.unique(binnumbers): for vv in xrange(Vdim): result[vv, i] = np.min(values[vv, binnumbers == i]) elif statistic == 'max': result.fill(np.nan) for i in np.unique(binnumbers): for vv in xrange(Vdim): result[vv, i] = np.max(values[vv, binnumbers == i]) elif callable(statistic): with np.errstate(invalid='ignore'), suppress_warnings() as sup: sup.filter(RuntimeWarning) try: null = statistic([]) except: null = np.nan result.fill(null) for i in np.unique(binnumbers): for vv in xrange(Vdim): result[vv, i] = statistic(values[vv, binnumbers == i]) # Shape into a proper matrix result = result.reshape(np.append(Vdim, nbin)) # Remove outliers (indices 0 and -1 for each bin-dimension). core = [slice(None)] + Ndim * [slice(1, -1)] result = result[core] # Unravel binnumbers into an ndarray, each row the bins for each dimension if(expand_binnumbers and Ndim > 1): binnumbers = np.asarray(np.unravel_index(binnumbers, nbin)) if np.any(result.shape[1:] != nbin - 2): raise RuntimeError('Internal Shape Error') # Reshape to have output (`reulst`) match input (`values`) shape result = result.reshape(input_shape[:-1] + list(nbin-2)) return BinnedStatisticddResult(result, edges, binnumbers)
25,912
40.795161
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_distr_params.py
""" Sane parameters for stats.distributions. """ distcont = [ ['alpha', (3.5704770516650459,)], ['anglit', ()], ['arcsine', ()], ['argus', (1.0,)], ['beta', (2.3098496451481823, 0.62687954300963677)], ['betaprime', (5, 6)], ['bradford', (0.29891359763170633,)], ['burr', (10.5, 4.3)], ['burr12', (10, 4)], ['cauchy', ()], ['chi', (78,)], ['chi2', (55,)], ['cosine', ()], ['crystalball', (2.0, 3.0)], ['dgamma', (1.1023326088288166,)], ['dweibull', (2.0685080649914673,)], ['erlang', (10,)], ['expon', ()], ['exponnorm', (1.5,)], ['exponpow', (2.697119160358469,)], ['exponweib', (2.8923945291034436, 1.9505288745913174)], ['f', (29, 18)], ['fatiguelife', (29,)], # correction numargs = 1 ['fisk', (3.0857548622253179,)], ['foldcauchy', (4.7164673455831894,)], ['foldnorm', (1.9521253373555869,)], ['frechet_l', (3.6279911255583239,)], ['frechet_r', (1.8928171603534227,)], ['gamma', (1.9932305483800778,)], ['gausshyper', (13.763771604130699, 3.1189636648681431, 2.5145980350183019, 5.1811649903971615)], # veryslow ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)], ['genextreme', (-0.1,)], ['gengamma', (4.4162385429431925, 3.1193091679242761)], ['gengamma', (4.4162385429431925, -3.1193091679242761)], ['genhalflogistic', (0.77274727809929322,)], ['genlogistic', (0.41192440799679475,)], ['gennorm', (1.2988442399460265,)], ['halfgennorm', (0.6748054997000371,)], ['genpareto', (0.1,)], # use case with finite moments ['gilbrat', ()], ['gompertz', (0.94743713075105251,)], ['gumbel_l', ()], ['gumbel_r', ()], ['halfcauchy', ()], ['halflogistic', ()], ['halfnorm', ()], ['hypsecant', ()], ['invgamma', (4.0668996136993067,)], ['invgauss', (0.14546264555347513,)], ['invweibull', (10.58,)], ['johnsonsb', (4.3172675099141058, 3.1837781130785063)], ['johnsonsu', (2.554395574161155, 2.2482281679651965)], ['kappa4', (0.0, 0.0)], ['kappa4', (-0.1, 0.1)], ['kappa4', (0.0, 0.1)], ['kappa4', (0.1, 0.0)], ['kappa3', (1.0,)], ['ksone', (1000,)], # replace 22 by 100 to avoid failing range, ticket 956 ['kstwobign', ()], ['laplace', ()], ['levy', ()], ['levy_l', ()], ['levy_stable', (0.35667405469844993, -0.67450531578494011)], # NotImplementedError # rvs not tested ['loggamma', (0.41411931826052117,)], ['logistic', ()], ['loglaplace', (3.2505926592051435,)], ['lognorm', (0.95368226960575331,)], ['lomax', (1.8771398388773268,)], ['maxwell', ()], ['mielke', (10.4, 3.6)], ['moyal', ()], ['nakagami', (4.9673794866666237,)], ['ncf', (27, 27, 0.41578441799226107)], ['nct', (14, 0.24045031331198066)], ['ncx2', (21, 1.0560465975116415)], ['norm', ()], ['norminvgauss', (1., 0.5)], ['pareto', (2.621716532144454,)], ['pearson3', (0.1,)], ['powerlaw', (1.6591133289905851,)], ['powerlognorm', (2.1413923530064087, 0.44639540782048337)], ['powernorm', (4.4453652254590779,)], ['rayleigh', ()], ['rdist', (0.9,)], # feels also slow ['recipinvgauss', (0.63004267809369119,)], ['reciprocal', (0.0062309367010521255, 1.0062309367010522)], ['rice', (0.7749725210111873,)], ['semicircular', ()], ['skewnorm', (4.0,)], ['t', (2.7433514990818093,)], ['trapz', (0.2, 0.8)], ['triang', (0.15785029824528218,)], ['truncexpon', (4.6907725456810478,)], ['truncnorm', (-1.0978730080013919, 2.7306754109031979)], ['truncnorm', (0.1, 2.)], ['tukeylambda', (3.1321477856738267,)], ['uniform', ()], ['vonmises', (3.9939042581071398,)], ['vonmises_line', (3.9939042581071398,)], ['wald', ()], ['weibull_max', (2.8687961709100187,)], ['weibull_min', (1.7866166930421596,)], ['wrapcauchy', (0.031071279018614728,)]] distdiscrete = [ ['bernoulli',(0.3,)], ['binom', (5, 0.4)], ['boltzmann',(1.4, 19)], ['dlaplace', (0.8,)], # 0.5 ['geom', (0.5,)], ['hypergeom',(30, 12, 6)], ['hypergeom',(21,3,12)], # numpy.random (3,18,12) numpy ticket:921 ['hypergeom',(21,18,11)], # numpy.random (18,3,11) numpy ticket:921 ['logser', (0.6,)], # re-enabled, numpy ticket:921 ['nbinom', (5, 0.5)], ['nbinom', (0.4, 0.4)], # from tickets: 583 ['planck', (0.51,)], # 4.1 ['poisson', (0.6,)], ['randint', (7, 31)], ['skellam', (15, 8)], ['zipf', (6.5,)] ]
4,586
33.75
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/setup.py
from __future__ import division, print_function, absolute_import from os.path import join def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('stats', parent_package, top_path) config.add_data_dir('tests') statlib_src = [join('statlib', '*.f')] config.add_library('statlib', sources=statlib_src) # add statlib module config.add_extension('statlib', sources=['statlib.pyf'], f2py_options=['--no-wrap-functions'], libraries=['statlib'], depends=statlib_src ) # add _stats module config.add_extension('_stats', sources=['_stats.c'], ) # add mvn module config.add_extension('mvn', sources=['mvn.pyf','mvndst.f'], ) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
938
23.076923
64
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/kde.py
#------------------------------------------------------------------------------- # # Define classes for (uni/multi)-variate kernel density estimation. # # Currently, only Gaussian kernels are implemented. # # Written by: Robert Kern # # Date: 2004-08-09 # # Modified: 2005-02-10 by Robert Kern. # Contributed to Scipy # 2005-10-07 by Robert Kern. # Some fixes to match the new scipy_core # # Copyright 2004-2005 by Enthought, Inc. # #------------------------------------------------------------------------------- from __future__ import division, print_function, absolute_import # Standard library imports. import warnings # Scipy imports. from scipy._lib.six import callable, string_types from scipy import linalg, special from scipy.special import logsumexp from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \ ravel, power, atleast_1d, squeeze, sum, transpose import numpy as np from numpy.random import randint, multivariate_normal # Local imports. from . import mvn __all__ = ['gaussian_kde'] class gaussian_kde(object): """Representation of a kernel-density estimate using Gaussian kernels. Kernel density estimation is a way to estimate the probability density function (PDF) of a random variable in a non-parametric way. `gaussian_kde` works for both uni-variate and multi-variate data. It includes automatic bandwidth determination. The estimation works best for a unimodal distribution; bimodal or multi-modal distributions tend to be oversmoothed. Parameters ---------- dataset : array_like Datapoints to estimate from. In case of univariate data this is a 1-D array, otherwise a 2-D array with shape (# of dims, # of data). bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), 'scott' is used. See Notes for more details. Attributes ---------- dataset : ndarray The dataset with which `gaussian_kde` was initialized. d : int Number of dimensions. n : int Number of datapoints. factor : float The bandwidth factor, obtained from `kde.covariance_factor`, with which the covariance matrix is multiplied. covariance : ndarray The covariance matrix of `dataset`, scaled by the calculated bandwidth (`kde.factor`). inv_cov : ndarray The inverse of `covariance`. Methods ------- evaluate __call__ integrate_gaussian integrate_box_1d integrate_box integrate_kde pdf logpdf resample set_bandwidth covariance_factor Notes ----- Bandwidth selection strongly influences the estimate obtained from the KDE (much more so than the actual shape of the kernel). Bandwidth selection can be done by a "rule of thumb", by cross-validation, by "plug-in methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde` uses a rule of thumb, the default is Scott's Rule. Scott's Rule [1]_, implemented as `scotts_factor`, is:: n**(-1./(d+4)), with ``n`` the number of data points and ``d`` the number of dimensions. Silverman's Rule [2]_, implemented as `silverman_factor`, is:: (n * (d + 2) / 4.)**(-1. / (d + 4)). Good general descriptions of kernel density estimation can be found in [1]_ and [2]_, the mathematics for this multi-dimensional implementation can be found in [1]_. References ---------- .. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and Visualization", John Wiley & Sons, New York, Chicester, 1992. .. [2] B.W. Silverman, "Density Estimation for Statistics and Data Analysis", Vol. 26, Monographs on Statistics and Applied Probability, Chapman and Hall, London, 1986. .. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993. .. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel conditional density estimation", Computational Statistics & Data Analysis, Vol. 36, pp. 279-298, 2001. Examples -------- Generate some random two-dimensional data: >>> from scipy import stats >>> def measure(n): ... "Measurement model, return two coupled measurements." ... m1 = np.random.normal(size=n) ... m2 = np.random.normal(scale=0.5, size=n) ... return m1+m2, m1-m2 >>> m1, m2 = measure(2000) >>> xmin = m1.min() >>> xmax = m1.max() >>> ymin = m2.min() >>> ymax = m2.max() Perform a kernel density estimate on the data: >>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j] >>> positions = np.vstack([X.ravel(), Y.ravel()]) >>> values = np.vstack([m1, m2]) >>> kernel = stats.gaussian_kde(values) >>> Z = np.reshape(kernel(positions).T, X.shape) Plot the results: >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, ... extent=[xmin, xmax, ymin, ymax]) >>> ax.plot(m1, m2, 'k.', markersize=2) >>> ax.set_xlim([xmin, xmax]) >>> ax.set_ylim([ymin, ymax]) >>> plt.show() """ def __init__(self, dataset, bw_method=None): self.dataset = atleast_2d(dataset) if not self.dataset.size > 1: raise ValueError("`dataset` input should have multiple elements.") self.d, self.n = self.dataset.shape self.set_bandwidth(bw_method=bw_method) def evaluate(self, points): """Evaluate the estimated pdf on a set of points. Parameters ---------- points : (# of dimensions, # of points)-array Alternatively, a (# of dimensions,) vector can be passed in and treated as a single point. Returns ------- values : (# of points,)-array The values at each point. Raises ------ ValueError : if the dimensionality of the input points is different than the dimensionality of the KDE. """ points = atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=float) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy = sum(diff*tdiff,axis=0) / 2.0 result = result + exp(-energy) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = sum(exp(-energy), axis=0) result = result / self._norm_factor return result __call__ = evaluate def integrate_gaussian(self, mean, cov): """ Multiply estimated density by a multivariate Gaussian and integrate over the whole space. Parameters ---------- mean : aray_like A 1-D array, specifying the mean of the Gaussian. cov : array_like A 2-D array, specifying the covariance matrix of the Gaussian. Returns ------- result : scalar The value of the integral. Raises ------ ValueError If the mean or covariance of the input Gaussian differs from the KDE's dimensionality. """ mean = atleast_1d(squeeze(mean)) cov = atleast_2d(cov) if mean.shape != (self.d,): raise ValueError("mean does not have dimension %s" % self.d) if cov.shape != (self.d, self.d): raise ValueError("covariance does not have dimension %s" % self.d) # make mean a column vector mean = mean[:, newaxis] sum_cov = self.covariance + cov # This will raise LinAlgError if the new cov matrix is not s.p.d # cho_factor returns (ndarray, bool) where bool is a flag for whether # or not ndarray is upper or lower triangular sum_cov_chol = linalg.cho_factor(sum_cov) diff = self.dataset - mean tdiff = linalg.cho_solve(sum_cov_chol, diff) sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det energies = sum(diff * tdiff, axis=0) / 2.0 result = sum(exp(-energies), axis=0) / norm_const / self.n return result def integrate_box_1d(self, low, high): """ Computes the integral of a 1D pdf between two bounds. Parameters ---------- low : scalar Lower bound of integration. high : scalar Upper bound of integration. Returns ------- value : scalar The result of the integral. Raises ------ ValueError If the KDE is over more than one dimension. """ if self.d != 1: raise ValueError("integrate_box_1d() only handles 1D pdfs") stdev = ravel(sqrt(self.covariance))[0] normalized_low = ravel((low - self.dataset) / stdev) normalized_high = ravel((high - self.dataset) / stdev) value = np.mean(special.ndtr(normalized_high) - special.ndtr(normalized_low)) return value def integrate_box(self, low_bounds, high_bounds, maxpts=None): """Computes the integral of a pdf over a rectangular interval. Parameters ---------- low_bounds : array_like A 1-D array containing the lower bounds of integration. high_bounds : array_like A 1-D array containing the upper bounds of integration. maxpts : int, optional The maximum number of points to use for integration. Returns ------- value : scalar The result of the integral. """ if maxpts is not None: extra_kwds = {'maxpts': maxpts} else: extra_kwds = {} value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset, self.covariance, **extra_kwds) if inform: msg = ('An integral in mvn.mvnun requires more points than %s' % (self.d * 1000)) warnings.warn(msg) return value def integrate_kde(self, other): """ Computes the integral of the product of this kernel density estimate with another. Parameters ---------- other : gaussian_kde instance The other kde. Returns ------- value : scalar The result of the integral. Raises ------ ValueError If the KDEs have different dimensionality. """ if other.d != self.d: raise ValueError("KDEs are not the same dimensionality") # we want to iterate over the smallest number of points if other.n < self.n: small = other large = self else: small = self large = other sum_cov = small.covariance + large.covariance sum_cov_chol = linalg.cho_factor(sum_cov) result = 0.0 for i in range(small.n): mean = small.dataset[:, i, newaxis] diff = large.dataset - mean tdiff = linalg.cho_solve(sum_cov_chol, diff) energies = sum(diff * tdiff, axis=0) / 2.0 result += sum(exp(-energies), axis=0) sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det result /= norm_const * large.n * small.n return result def resample(self, size=None): """ Randomly sample a dataset from the estimated pdf. Parameters ---------- size : int, optional The number of samples to draw. If not provided, then the size is the same as the underlying dataset. Returns ------- resample : (self.d, `size`) ndarray The sampled dataset. """ if size is None: size = self.n norm = transpose(multivariate_normal(zeros((self.d,), float), self.covariance, size=size)) indices = randint(0, self.n, size=size) means = self.dataset[:, indices] return means + norm def scotts_factor(self): return power(self.n, -1./(self.d+4)) def silverman_factor(self): return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4)) # Default method to calculate bandwidth, can be overwritten by subclass covariance_factor = scotts_factor covariance_factor.__doc__ = """Computes the coefficient (`kde.factor`) that multiplies the data covariance matrix to obtain the kernel covariance matrix. The default is `scotts_factor`. A subclass can overwrite this method to provide a different method, or set it through a call to `kde.set_bandwidth`.""" def set_bandwidth(self, bw_method=None): """Compute the estimator bandwidth with given method. The new bandwidth calculated after a call to `set_bandwidth` is used for subsequent evaluations of the estimated density. Parameters ---------- bw_method : str, scalar or callable, optional The method used to calculate the estimator bandwidth. This can be 'scott', 'silverman', a scalar constant or a callable. If a scalar, this will be used directly as `kde.factor`. If a callable, it should take a `gaussian_kde` instance as only parameter and return a scalar. If None (default), nothing happens; the current `kde.covariance_factor` method is kept. Notes ----- .. versionadded:: 0.11 Examples -------- >>> import scipy.stats as stats >>> x1 = np.array([-7, -5, 1, 4, 5.]) >>> kde = stats.gaussian_kde(x1) >>> xs = np.linspace(-10, 10, num=50) >>> y1 = kde(xs) >>> kde.set_bandwidth(bw_method='silverman') >>> y2 = kde(xs) >>> kde.set_bandwidth(bw_method=kde.factor / 3.) >>> y3 = kde(xs) >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots() >>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo', ... label='Data points (rescaled)') >>> ax.plot(xs, y1, label='Scott (default)') >>> ax.plot(xs, y2, label='Silverman') >>> ax.plot(xs, y3, label='Const (1/3 * Silverman)') >>> ax.legend() >>> plt.show() """ if bw_method is None: pass elif bw_method == 'scott': self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor elif np.isscalar(bw_method) and not isinstance(bw_method, string_types): self._bw_method = 'use constant' self.covariance_factor = lambda: bw_method elif callable(bw_method): self._bw_method = bw_method self.covariance_factor = lambda: self._bw_method(self) else: msg = "`bw_method` should be 'scott', 'silverman', a scalar " \ "or a callable." raise ValueError(msg) self._compute_covariance() def _compute_covariance(self): """Computes the covariance matrix for each Gaussian kernel using covariance_factor(). """ self.factor = self.covariance_factor() # Cache covariance and inverse covariance of the data if not hasattr(self, '_data_inv_cov'): self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1, bias=False)) self._data_inv_cov = linalg.inv(self._data_covariance) self.covariance = self._data_covariance * self.factor**2 self.inv_cov = self._data_inv_cov / self.factor**2 self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n def pdf(self, x): """ Evaluate the estimated pdf on a provided set of points. Notes ----- This is an alias for `gaussian_kde.evaluate`. See the ``evaluate`` docstring for more details. """ return self.evaluate(x) def logpdf(self, x): """ Evaluate the log of the estimated pdf on a provided set of points. """ points = atleast_2d(x) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=float) if m >= self.n: # there are more points than data, so loop over data energy = zeros((self.n, m), dtype=float) for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy[i] = sum(diff*tdiff,axis=0) / 2.0 result = logsumexp(-energy, b=1/self._norm_factor, axis=0) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = logsumexp(-energy, b=1/self._norm_factor) return result
18,766
32.215929
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/stats.py
# Copyright 2002 Gary Strangman. All rights reserved # Copyright 2002-2016 The SciPy Developers # # The original code from Gary Strangman was heavily adapted for # use in SciPy by Travis Oliphant. The original code came with the # following disclaimer: # # This software is provided "as-is". There are no expressed or implied # warranties of any kind, including, but not limited to, the warranties # of merchantability and fitness for a given application. In no event # shall Gary Strangman be liable for any direct, indirect, incidental, # special, exemplary or consequential damages (including, but not limited # to, loss of use, data or profits, or business interruption) however # caused and on any theory of liability, whether in contract, strict # liability or tort (including negligence or otherwise) arising in any way # out of the use of this software, even if advised of the possibility of # such damage. """ A collection of basic statistical functions for Python. The function names appear below. Some scalar functions defined here are also available in the scipy.special package where they work on arbitrary sized arrays. Disclaimers: The function list is obviously incomplete and, worse, the functions are not optimized. All functions have been tested (some more so than others), but they are far from bulletproof. Thus, as with any free software, no warranty or guarantee is expressed or implied. :-) A few extra functions that don't appear in the list below can be found by interested treasure-hunters. These functions don't necessarily have both list and array versions but were deemed useful. Central Tendency ---------------- .. autosummary:: :toctree: generated/ gmean hmean mode Moments ------- .. autosummary:: :toctree: generated/ moment variation skew kurtosis normaltest Altered Versions ---------------- .. autosummary:: :toctree: generated/ tmean tvar tstd tsem describe Frequency Stats --------------- .. autosummary:: :toctree: generated/ itemfreq scoreatpercentile percentileofscore cumfreq relfreq Variability ----------- .. autosummary:: :toctree: generated/ obrientransform sem zmap zscore iqr Trimming Functions ------------------ .. autosummary:: :toctree: generated/ trimboth trim1 Correlation Functions --------------------- .. autosummary:: :toctree: generated/ pearsonr fisher_exact spearmanr pointbiserialr kendalltau weightedtau linregress theilslopes Inferential Stats ----------------- .. autosummary:: :toctree: generated/ ttest_1samp ttest_ind ttest_ind_from_stats ttest_rel chisquare power_divergence ks_2samp mannwhitneyu ranksums wilcoxon kruskal friedmanchisquare combine_pvalues Statistical Distances --------------------- .. autosummary:: :toctree: generated/ wasserstein_distance energy_distance ANOVA Functions --------------- .. autosummary:: :toctree: generated/ f_oneway Support Functions ----------------- .. autosummary:: :toctree: generated/ rankdata References ---------- .. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. """ from __future__ import division, print_function, absolute_import import warnings import math from collections import namedtuple import numpy as np from numpy import array, asarray, ma, zeros from scipy._lib.six import callable, string_types from scipy._lib._version import NumpyVersion import scipy.special as special import scipy.linalg as linalg from . import distributions from . import mstats_basic from ._distn_infrastructure import _lazywhere from ._stats_mstats_common import _find_repeats, linregress, theilslopes from ._stats import _kendall_dis, _toint64, _weightedrankedtau __all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar', 'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation', 'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest', 'normaltest', 'jarque_bera', 'itemfreq', 'scoreatpercentile', 'percentileofscore', 'cumfreq', 'relfreq', 'obrientransform', 'sem', 'zmap', 'zscore', 'iqr', 'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway', 'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr', 'kendalltau', 'weightedtau', 'linregress', 'theilslopes', 'ttest_1samp', 'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest', 'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu', 'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare', 'rankdata', 'combine_pvalues', 'wasserstein_distance', 'energy_distance'] def _chk_asarray(a, axis): if axis is None: a = np.ravel(a) outaxis = 0 else: a = np.asarray(a) outaxis = axis if a.ndim == 0: a = np.atleast_1d(a) return a, outaxis def _chk2_asarray(a, b, axis): if axis is None: a = np.ravel(a) b = np.ravel(b) outaxis = 0 else: a = np.asarray(a) b = np.asarray(b) outaxis = axis if a.ndim == 0: a = np.atleast_1d(a) if b.ndim == 0: b = np.atleast_1d(b) return a, b, outaxis def _contains_nan(a, nan_policy='propagate'): policies = ['propagate', 'raise', 'omit'] if nan_policy not in policies: raise ValueError("nan_policy must be one of {%s}" % ', '.join("'%s'" % s for s in policies)) try: # Calling np.sum to avoid creating a huge array into memory # e.g. np.isnan(a).any() with np.errstate(invalid='ignore'): contains_nan = np.isnan(np.sum(a)) except TypeError: # If the check cannot be properly performed we fallback to omitting # nan values and raising a warning. This can happen when attempting to # sum things that are not numbers (e.g. as in the function `mode`). contains_nan = False nan_policy = 'omit' warnings.warn("The input array could not be properly checked for nan " "values. nan values will be ignored.", RuntimeWarning) if contains_nan and nan_policy == 'raise': raise ValueError("The input contains nan values") return (contains_nan, nan_policy) def gmean(a, axis=0, dtype=None): """ Compute the geometric mean along the specified axis. Return the geometric average of the array elements. That is: n-th root of (x1 * x2 * ... * xn) Parameters ---------- a : array_like Input array or object that can be converted to an array. axis : int or None, optional Axis along which the geometric mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- gmean : ndarray see dtype parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average hmean : Harmonic mean Notes ----- The geometric average is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity because masked arrays automatically mask any non-finite values. Examples -------- >>> from scipy.stats import gmean >>> gmean([1, 4]) 2.0 >>> gmean([1, 2, 3, 4, 5, 6, 7]) 3.3800151591412964 """ if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) return np.exp(log_a.mean(axis=axis)) def hmean(a, axis=0, dtype=None): """ Calculate the harmonic mean along the specified axis. That is: n / (1/x1 + 1/x2 + ... + 1/xn) Parameters ---------- a : array_like Input array, masked array or object that can be converted to an array. axis : int or None, optional Axis along which the harmonic mean is computed. Default is 0. If None, compute over the whole array `a`. dtype : dtype, optional Type of the returned array and of the accumulator in which the elements are summed. If `dtype` is not specified, it defaults to the dtype of `a`, unless `a` has an integer `dtype` with a precision less than that of the default platform integer. In that case, the default platform integer is used. Returns ------- hmean : ndarray see `dtype` parameter above See Also -------- numpy.mean : Arithmetic average numpy.average : Weighted average gmean : Geometric mean Notes ----- The harmonic mean is computed over a single dimension of the input array, axis=0 by default, or all values in the array if axis=None. float64 intermediate and return values are used for integer inputs. Use masked arrays to ignore any non-finite values in the input or that arise in the calculations such as Not a Number and infinity. Examples -------- >>> from scipy.stats import hmean >>> hmean([1, 4]) 1.6000000000000001 >>> hmean([1, 2, 3, 4, 5, 6, 7]) 2.6997245179063363 """ if not isinstance(a, np.ndarray): a = np.array(a, dtype=dtype) if np.all(a > 0): # Harmonic mean only defined if greater than zero if isinstance(a, np.ma.MaskedArray): size = a.count(axis) else: if axis is None: a = a.ravel() size = a.shape[0] else: size = a.shape[axis] return size / np.sum(1.0 / a, axis=axis, dtype=dtype) else: raise ValueError("Harmonic mean only defined if all elements greater " "than zero") ModeResult = namedtuple('ModeResult', ('mode', 'count')) def mode(a, axis=0, nan_policy='propagate'): """ Return an array of the modal (most common) value in the passed array. If there is more than one such value, only the smallest is returned. The bin-count for the modal bins is also returned. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Examples -------- >>> a = np.array([[6, 8, 3, 0], ... [3, 2, 1, 7], ... [8, 1, 8, 4], ... [5, 3, 0, 5], ... [4, 7, 5, 9]]) >>> from scipy import stats >>> stats.mode(a) (array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]])) To get mode of whole array, specify ``axis=None``: >>> stats.mode(a, axis=None) (array([3]), array([3])) """ a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) scores = np.unique(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.expand_dims(np.sum(template, axis), axis) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts) def _mask_to_limits(a, limits, inclusive): """Mask an array for values outside of given limits. This is primarily a utility function. Parameters ---------- a : array limits : (float or None, float or None) A tuple consisting of the (lower limit, upper limit). Values in the input array less than the lower limit or greater than the upper limit will be masked out. None implies no limit. inclusive : (bool, bool) A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to lower or upper are allowed. Returns ------- A MaskedArray. Raises ------ A ValueError if there are no values within the given limits. """ lower_limit, upper_limit = limits lower_include, upper_include = inclusive am = ma.MaskedArray(a) if lower_limit is not None: if lower_include: am = ma.masked_less(am, lower_limit) else: am = ma.masked_less_equal(am, lower_limit) if upper_limit is not None: if upper_include: am = ma.masked_greater(am, upper_limit) else: am = ma.masked_greater_equal(am, upper_limit) if am.count() == 0: raise ValueError("No array values within given limits") return am def tmean(a, limits=None, inclusive=(True, True), axis=None): """ Compute the trimmed mean. This function finds the arithmetic mean of given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None (default), then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to compute test. Default is None. Returns ------- tmean : float See also -------- trim_mean : returns mean after trimming a proportion from both tails. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmean(x) 9.5 >>> stats.tmean(x, (3,17)) 10.0 """ a = asarray(a) if limits is None: return np.mean(a, None) am = _mask_to_limits(a.ravel(), limits, inclusive) return am.mean(axis=axis) def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed variance. This function computes the sample variance of an array of values, while ignoring values which are outside of given `limits`. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tvar : float Trimmed variance. Notes ----- `tvar` computes the unbiased sample variance, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tvar(x) 35.0 >>> stats.tvar(x, (3,17)) 20.0 """ a = asarray(a) a = a.astype(float).ravel() if limits is None: n = len(a) return a.var() * n / (n - 1.) am = _mask_to_limits(a, limits, inclusive) return np.ma.var(am, ddof=ddof, axis=axis) def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'): """ Compute the trimmed minimum. This function finds the miminum value of an array `a` along the specified axis, but only considering values greater than a specified lower limit. Parameters ---------- a : array_like array of values lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- tmin : float, int or ndarray Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmin(x) 0 >>> stats.tmin(x, 13) 13 >>> stats.tmin(x, 13, inclusive=False) 14 """ a, axis = _chk_asarray(a, axis) am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False)) contains_nan, nan_policy = _contains_nan(am, nan_policy) if contains_nan and nan_policy == 'omit': am = ma.masked_invalid(am) res = ma.minimum.reduce(am, axis).data if res.ndim == 0: return res[()] return res def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'): """ Compute the trimmed maximum. This function computes the maximum value of an array along a given axis, while ignoring values larger than a specified upper limit. Parameters ---------- a : array_like array of values upperlimit : None or float, optional Values in the input array greater than the given limit will be ignored. When upperlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the upper limit are included. The default value is True. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- tmax : float, int or ndarray Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmax(x) 19 >>> stats.tmax(x, 13) 13 >>> stats.tmax(x, 13, inclusive=False) 12 """ a, axis = _chk_asarray(a, axis) am = _mask_to_limits(a, (None, upperlimit), (False, inclusive)) contains_nan, nan_policy = _contains_nan(am, nan_policy) if contains_nan and nan_policy == 'omit': am = ma.masked_invalid(am) res = ma.maximum.reduce(am, axis).data if res.ndim == 0: return res[()] return res def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed sample standard deviation. This function finds the sample standard deviation of given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tstd : float Notes ----- `tstd` computes the unbiased sample standard deviation, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tstd(x) 5.9160797830996161 >>> stats.tstd(x, (3,17)) 4.4721359549995796 """ return np.sqrt(tvar(a, limits, inclusive, axis, ddof)) def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Notes ----- `tsem` uses unbiased sample standard deviation, i.e. it uses a correction factor ``n / (n - 1)``. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.tsem(x) 1.3228756555322954 >>> stats.tsem(x, (3,17)) 1.1547005383792515 """ a = np.asarray(a).ravel() if limits is None: return a.std(ddof=ddof) / np.sqrt(a.size) am = _mask_to_limits(a, limits, inclusive) sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis)) return sd / np.sqrt(am.count()) ##################################### # MOMENTS # ##################################### def moment(a, moment=1, axis=0, nan_policy='propagate'): r""" Calculate the nth moment about the mean for a sample. A moment is a specific quantitative measure of the shape of a set of points. It is often used to calculate coefficients of skewness and kurtosis due to its close relationship with them. Parameters ---------- a : array_like data moment : int or array_like of ints, optional order of central moment that is returned. Default is 1. axis : int or None, optional Axis along which the central moment is computed. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- n-th central moment : ndarray or float The appropriate moment along the given axis or over all values if axis is None. The denominator for the moment calculation is the number of observations, no degrees of freedom correction is done. See also -------- kurtosis, skew, describe Notes ----- The k-th central moment of a data sample is: .. math:: m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k Where n is the number of samples and x-bar is the mean. This function uses exponentiation by squares [1]_ for efficiency. References ---------- .. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms Examples -------- >>> from scipy.stats import moment >>> moment([1, 2, 3, 4, 5], moment=1) 0.0 >>> moment([1, 2, 3, 4, 5], moment=2) 2.0 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.moment(a, moment, axis) if a.size == 0: # empty array, return nan(s) with shape matching `moment` if np.isscalar(moment): return np.nan else: return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan # for array_like moment input, return a value for each. if not np.isscalar(moment): mmnt = [_moment(a, i, axis) for i in moment] return np.array(mmnt) else: return _moment(a, moment, axis) def _moment(a, moment, axis): if np.abs(moment - np.round(moment)) > 0: raise ValueError("All moment parameters must be integers") if moment == 0: # When moment equals 0, the result is 1, by definition. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.ones(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return 1.0 elif moment == 1: # By definition the first moment about the mean is 0. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.zeros(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return np.float64(0.0) else: # Exponentiation by squares: form exponent sequence n_list = [moment] current_n = moment while current_n > 2: if current_n % 2: current_n = (current_n - 1) / 2 else: current_n /= 2 n_list.append(current_n) # Starting point for exponentiation by squares a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis) if n_list[-1] == 1: s = a_zero_mean.copy() else: s = a_zero_mean**2 # Perform multiplications for n in n_list[-2::-1]: s = s**2 if n % 2: s *= a_zero_mean return np.mean(s, axis) def variation(a, axis=0, nan_policy='propagate'): """ Compute the coefficient of variation, the ratio of the biased standard deviation to the mean. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate the coefficient of variation. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- variation : ndarray The calculated variation along the requested axis. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Examples -------- >>> from scipy.stats import variation >>> variation([1, 2, 3, 4, 5]) 0.47140452079103173 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.variation(a, axis) return a.std(axis) / a.mean(axis) def skew(a, axis=0, bias=True, nan_policy='propagate'): """ Compute the skewness of a data set. For normally distributed data, the skewness should be about 0. For unimodal continuous distributions, a skewness value > 0 means that there is more weight in the right tail of the distribution. The function `skewtest` can be used to determine if the skewness value is close enough to 0, statistically speaking. Parameters ---------- a : ndarray data axis : int or None, optional Axis along which skewness is calculated. Default is 0. If None, compute over the whole array `a`. bias : bool, optional If False, then the calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- skewness : ndarray The skewness of values along an axis, returning 0 where all values are equal. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Section 2.2.24.1 Examples -------- >>> from scipy.stats import skew >>> skew([1, 2, 3, 4, 5]) 0.0 >>> skew([2, 8, 0, 4, 1, 9, 9, 0]) 0.2650554122698573 """ a, axis = _chk_asarray(a, axis) n = a.shape[axis] contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.skew(a, axis, bias) m2 = moment(a, 2, axis) m3 = moment(a, 3, axis) zero = (m2 == 0) vals = _lazywhere(~zero, (m2, m3), lambda m2, m3: m3 / m2**1.5, 0.) if not bias: can_correct = (n > 2) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m3 = np.extract(can_correct, m3) nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5 np.place(vals, can_correct, nval) if vals.ndim == 0: return vals.item() return vals def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'): """ Compute the kurtosis (Fisher or Pearson) of a dataset. Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definition is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution. If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from biased moment estimators Use `kurtosistest` to see if result is close enough to normal. Parameters ---------- a : array data for which the kurtosis is calculated axis : int or None, optional Axis along which the kurtosis is calculated. Default is 0. If None, compute over the whole array `a`. fisher : bool, optional If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's definition is used (normal ==> 3.0). bias : bool, optional If False, then the calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- kurtosis : array The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition and 0 for Pearson's definition. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Examples -------- >>> from scipy.stats import kurtosis >>> kurtosis([1, 2, 3, 4, 5]) -1.3 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.kurtosis(a, axis, fisher, bias) n = a.shape[axis] m2 = moment(a, 2, axis) m4 = moment(a, 4, axis) zero = (m2 == 0) olderr = np.seterr(all='ignore') try: vals = np.where(zero, 0, m4 / m2**2.0) finally: np.seterr(**olderr) if not bias: can_correct = (n > 3) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m4 = np.extract(can_correct, m4) nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0) np.place(vals, can_correct, nval + 3.0) if vals.ndim == 0: vals = vals.item() # array scalar if fisher: return vals - 3 else: return vals DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis')) def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'): """ Compute several descriptive statistics of the passed array. Parameters ---------- a : array_like Input data. axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees of freedom (only for variance). Default is 1. bias : bool, optional If False, then the skewness and kurtosis calculations are corrected for statistical bias. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- nobs : int or ndarray of ints Number of observations (length of data along `axis`). When 'omit' is chosen as nan_policy, each column is counted separately. minmax: tuple of ndarrays or floats Minimum and maximum value of data array. mean : ndarray or float Arithmetic mean of data along axis. variance : ndarray or float Unbiased variance of the data along axis, denominator is number of observations minus one. skewness : ndarray or float Skewness, based on moment calculations with denominator equal to the number of observations, i.e. no degrees of freedom correction. kurtosis : ndarray or float Kurtosis (Fisher). The kurtosis is normalized so that it is zero for the normal distribution. No degrees of freedom are used. See Also -------- skew, kurtosis Examples -------- >>> from scipy import stats >>> a = np.arange(10) >>> stats.describe(a) DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666, skewness=0.0, kurtosis=-1.2242424242424244) >>> b = [[1, 2], [3, 4]] >>> stats.describe(b) DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])), mean=array([2., 3.]), variance=array([2., 2.]), skewness=array([0., 0.]), kurtosis=array([-2., -2.])) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.describe(a, axis, ddof, bias) if a.size == 0: raise ValueError("The input must not be empty.") n = a.shape[axis] mm = (np.min(a, axis=axis), np.max(a, axis=axis)) m = np.mean(a, axis=axis) v = np.var(a, axis=axis, ddof=ddof) sk = skew(a, axis, bias=bias) kurt = kurtosis(a, axis, bias=bias) return DescribeResult(n, mm, m, v, sk, kurt) ##################################### # NORMALITY TESTS # ##################################### SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) def skewtest(a, axis=0, nan_policy='propagate'): """ Test whether the skew is different from the normal distribution. This function tests the null hypothesis that the skewness of the population that the sample was drawn from is the same as that of a corresponding normal distribution. Parameters ---------- a : array The data to be tested axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The computed z-score for this test. pvalue : float a 2-sided p-value for the hypothesis test Notes ----- The sample size must be at least 8. References ---------- .. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr., "A suggestion for using powerful and informative tests of normality", American Statistician 44, pp. 316-321, 1990. Examples -------- >>> from scipy.stats import skewtest >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8]) SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897) >>> skewtest([2, 8, 0, 4, 1, 9, 9, 0]) SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459) >>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000]) SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133) >>> skewtest([100, 100, 100, 100, 100, 100, 100, 101]) SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.skewtest(a, axis) if axis is None: a = np.ravel(a) axis = 0 b2 = skew(a, axis) n = float(a.shape[axis]) if n < 8: raise ValueError( "skewtest is not valid with less than 8 samples; %i samples" " were given." % int(n)) y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2))) beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) / ((n-2.0) * (n+5) * (n+7) * (n+9))) W2 = -1 + math.sqrt(2 * (beta2 - 1)) delta = 1 / math.sqrt(0.5 * math.log(W2)) alpha = math.sqrt(2.0 / (W2 - 1)) y = np.where(y == 0, 1, y) Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1)) return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) def kurtosistest(a, axis=0, nan_policy='propagate'): """ Test whether a dataset has normal kurtosis. This function tests the null hypothesis that the kurtosis of the population from which the sample was drawn is that of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``. Parameters ---------- a : array array of the sample data axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The computed z-score for this test. pvalue : float The 2-sided p-value for the hypothesis test Notes ----- Valid only for n>20. The Z-score is set to 0 for bad entries. This function uses the method described in [1]_. References ---------- .. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983. Examples -------- >>> from scipy.stats import kurtosistest >>> kurtosistest(list(range(20))) KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348) >>> np.random.seed(28041990) >>> s = np.random.normal(0, 1, 1000) >>> kurtosistest(s) KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.kurtosistest(a, axis) n = float(a.shape[axis]) if n < 5: raise ValueError( "kurtosistest requires at least 5 observations; %i observations" " were given." % int(n)) if n < 20: warnings.warn("kurtosistest only valid for n>=20 ... continuing " "anyway, n=%i" % int(n)) b2 = kurtosis(a, axis, fisher=False) E = 3.0*(n-1) / (n+1) varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1 x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4 # [1]_ Eq. 2: sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / (n*(n-2)*(n-3))) # [1]_ Eq. 3: A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) term1 = 1 - 2/(9.0*A) denom = 1 + x*np.sqrt(2/(A-4.0)) denom = np.where(denom < 0, 99, denom) term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0)) Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5 Z = np.where(denom == 99, 0, Z) if Z.ndim == 0: Z = Z[()] # zprob uses upper tail, so Z needs to be positive return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) def normaltest(a, axis=0, nan_policy='propagate'): """ Test whether a sample differs from a normal distribution. This function tests the null hypothesis that a sample comes from a normal distribution. It is based on D'Agostino and Pearson's [1]_, [2]_ test that combines skew and kurtosis to produce an omnibus test of normality. Parameters ---------- a : array_like The array containing the sample to be tested. axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and ``k`` is the z-score returned by `kurtosistest`. pvalue : float or array A 2-sided chi squared probability for the hypothesis test. References ---------- .. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for moderate and large sample size", Biometrika, 58, 341-348 .. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from normality", Biometrika, 60, 613-622 Examples -------- >>> from scipy import stats >>> pts = 1000 >>> np.random.seed(28041990) >>> a = np.random.normal(0, 1, size=pts) >>> b = np.random.normal(2, 1, size=pts) >>> x = np.concatenate((a, b)) >>> k2, p = stats.normaltest(x) >>> alpha = 1e-3 >>> print("p = {:g}".format(p)) p = 3.27207e-11 >>> if p < alpha: # null hypothesis: x comes from a normal distribution ... print("The null hypothesis can be rejected") ... else: ... print("The null hypothesis cannot be rejected") The null hypothesis can be rejected """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.normaltest(a, axis) s, _ = skewtest(a, axis) k, _ = kurtosistest(a, axis) k2 = s*s + k*k return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) def jarque_bera(x): """ Perform the Jarque-Bera goodness of fit test on sample data. The Jarque-Bera test tests whether the sample data has the skewness and kurtosis matching a normal distribution. Note that this test only works for a large enough number of data samples (>2000) as the test statistic asymptotically has a Chi-squared distribution with 2 degrees of freedom. Parameters ---------- x : array_like Observations of a random variable. Returns ------- jb_value : float The test statistic. p : float The p-value for the hypothesis test. References ---------- .. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality, homoscedasticity and serial independence of regression residuals", 6 Econometric Letters 255-259. Examples -------- >>> from scipy import stats >>> np.random.seed(987654321) >>> x = np.random.normal(0, 1, 100000) >>> y = np.random.rayleigh(1, 100000) >>> stats.jarque_bera(x) (4.7165707989581342, 0.09458225503041906) >>> stats.jarque_bera(y) (6713.7098548143422, 0.0) """ x = np.asarray(x) n = float(x.size) if n == 0: raise ValueError('At least one observation is required.') mu = x.mean() diffx = x - mu skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.) kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2 jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4) p = 1 - distributions.chi2.cdf(jb_value, 2) return jb_value, p ##################################### # FREQUENCY FUNCTIONS # ##################################### @np.deprecate(message="`itemfreq` is deprecated and will be removed in a " "future version. Use instead `np.unique(..., return_counts=True)`") def itemfreq(a): """ Return a 2-D array of item frequencies. Parameters ---------- a : (N,) array_like Input array. Returns ------- itemfreq : (K, 2) ndarray A 2-D frequency table. Column 1 contains sorted, unique values from `a`, column 2 contains their respective counts. Examples -------- >>> from scipy import stats >>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4]) >>> stats.itemfreq(a) array([[ 0., 2.], [ 1., 4.], [ 2., 2.], [ 4., 1.], [ 5., 1.]]) >>> np.bincount(a) array([2, 4, 2, 0, 1, 1]) >>> stats.itemfreq(a/10.) array([[ 0. , 2. ], [ 0.1, 4. ], [ 0.2, 2. ], [ 0.4, 1. ], [ 0.5, 1. ]]) """ items, inv = np.unique(a, return_inverse=True) freq = np.bincount(inv) return np.array([items, freq]).T def scoreatpercentile(a, per, limit=(), interpolation_method='fraction', axis=None): """ Calculate the score at a given percentile of the input sequence. For example, the score at `per=50` is the median. If the desired quantile lies between two data points, we interpolate between them, according to the value of `interpolation`. If the parameter `limit` is provided, it should be a tuple (lower, upper) of two values. Parameters ---------- a : array_like A 1-D array of values from which to extract score. per : array_like Percentile(s) at which to extract score. Values should be in range [0,100]. limit : tuple, optional Tuple of two scalars, the lower and upper limits within which to compute the percentile. Values of `a` outside this (closed) interval will be ignored. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j` - fraction: ``i + (j - i) * fraction`` where ``fraction`` is the fractional part of the index surrounded by ``i`` and ``j``. - lower: ``i``. - higher: ``j``. axis : int, optional Axis along which the percentiles are computed. Default is None. If None, compute over the whole array `a`. Returns ------- score : float or ndarray Score at percentile(s). See Also -------- percentileofscore, numpy.percentile Notes ----- This function will become obsolete in the future. For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality that `scoreatpercentile` provides. And it's significantly faster. Therefore it's recommended to use `numpy.percentile` for users that have numpy >= 1.9. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ # adapted from NumPy's percentile function. When we require numpy >= 1.8, # the implementation of this function can be replaced by np.percentile. a = np.asarray(a) if a.size == 0: # empty array, return nan(s) with shape matching `per` if np.isscalar(per): return np.nan else: return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan if limit: a = a[(limit[0] <= a) & (a <= limit[1])] sorted = np.sort(a, axis=axis) if axis is None: axis = 0 return _compute_qth_percentile(sorted, per, interpolation_method, axis) # handle sequence of per's without calling sort multiple times def _compute_qth_percentile(sorted, per, interpolation_method, axis): if not np.isscalar(per): score = [_compute_qth_percentile(sorted, i, interpolation_method, axis) for i in per] return np.array(score) if (per < 0) or (per > 100): raise ValueError("percentile must be in the range [0, 100]") indexer = [slice(None)] * sorted.ndim idx = per / 100. * (sorted.shape[axis] - 1) if int(idx) != idx: # round fractional indices according to interpolation method if interpolation_method == 'lower': idx = int(np.floor(idx)) elif interpolation_method == 'higher': idx = int(np.ceil(idx)) elif interpolation_method == 'fraction': pass # keep idx as fraction and interpolate else: raise ValueError("interpolation_method can only be 'fraction', " "'lower' or 'higher'") i = int(idx) if i == idx: indexer[axis] = slice(i, i + 1) weights = array(1) sumval = 1.0 else: indexer[axis] = slice(i, i + 2) j = i + 1 weights = array([(j - idx), (idx - i)], float) wshape = [1] * sorted.ndim wshape[axis] = 2 weights.shape = wshape sumval = weights.sum() # Use np.add.reduce (== np.sum but a little faster) to coerce data type return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval def percentileofscore(a, score, kind='rank'): """ The percentile rank of a score relative to a list of scores. A `percentileofscore` of, for example, 80% means that 80% of the scores in `a` are below the given score. In the case of gaps or ties, the exact definition depends on the optional keyword, `kind`. Parameters ---------- a : array_like Array of scores to which `score` is compared. score : int or float Score that is compared to the elements in `a`. kind : {'rank', 'weak', 'strict', 'mean'}, optional This optional parameter specifies the interpretation of the resulting score: - "rank": Average percentage ranking of score. In case of multiple matches, average the percentage rankings of all matching scores. - "weak": This kind corresponds to the definition of a cumulative distribution function. A percentileofscore of 80% means that 80% of values are less than or equal to the provided score. - "strict": Similar to "weak", except that only values that are strictly less than the given score are counted. - "mean": The average of the "weak" and "strict" scores, often used in testing. See http://en.wikipedia.org/wiki/Percentile_rank Returns ------- pcos : float Percentile-position of score (0-100) relative to `a`. See Also -------- numpy.percentile Examples -------- Three-quarters of the given values lie below a given score: >>> from scipy import stats >>> stats.percentileofscore([1, 2, 3, 4], 3) 75.0 With multiple matches, note how the scores of the two matches, 0.6 and 0.8 respectively, are averaged: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3) 70.0 Only 2/5 values are strictly less than 3: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict') 40.0 But 4/5 values are less than or equal to 3: >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak') 80.0 The average between the weak and the strict scores is >>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean') 60.0 """ if np.isnan(score): return np.nan a = np.asarray(a) n = len(a) if n == 0: return 100.0 if kind == 'rank': left = np.count_nonzero(a < score) right = np.count_nonzero(a <= score) pct = (right + left + (1 if right > left else 0)) * 50.0/n return pct elif kind == 'strict': return np.count_nonzero(a < score) / float(n) * 100 elif kind == 'weak': return np.count_nonzero(a <= score) / float(n) * 100 elif kind == 'mean': pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / float(n) * 50 return pct else: raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'") HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit', 'binsize', 'extrapoints')) def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False): """ Separate the range into several bins and return the number of instances in each bin. Parameters ---------- a : array_like Array of scores which will be put into bins. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultlimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in a is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 printextras : bool, optional If True, if there are extra points (i.e. the points that fall outside the bin limits) a warning is raised saying how many of those points there are. Default is False. Returns ------- count : ndarray Number of points (or sum of weights) in each bin. lowerlimit : float Lowest value of histogram, the lower limit of the first bin. binsize : float The size of the bins (all bins have the same size). extrapoints : int The number of points outside the range of the histogram. See Also -------- numpy.histogram Notes ----- This histogram is based on numpy's histogram but has a larger range by default if default limits is not set. """ a = np.ravel(a) if defaultlimits is None: if a.size == 0: # handle empty arrays. Undetermined range, so use 0-1. defaultlimits = (0, 1) else: # no range given, so use values in `a` data_min = a.min() data_max = a.max() # Have bins extend past min and max values slightly s = (data_max - data_min) / (2. * (numbins - 1.)) defaultlimits = (data_min - s, data_max + s) # use numpy's histogram method to compute bins hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits, weights=weights) # hist are not always floats, convert to keep with old output hist = np.array(hist, dtype=float) # fixed width for bins is assumed, as numpy's histogram gives # fixed width bins for int values for 'bins' binsize = bin_edges[1] - bin_edges[0] # calculate number of extra points extrapoints = len([v for v in a if defaultlimits[0] > v or v > defaultlimits[1]]) if extrapoints > 0 and printextras: warnings.warn("Points outside given histogram range = %s" % extrapoints) return HistogramResult(hist, defaultlimits[0], binsize, extrapoints) CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit', 'binsize', 'extrapoints')) def cumfreq(a, numbins=10, defaultreallimits=None, weights=None): """ Return a cumulative frequency histogram, using the histogram function. A cumulative histogram is a mapping that counts the cumulative number of observations in all of the bins up to the specified bin. Parameters ---------- a : array_like Input array. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultreallimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 Returns ------- cumcount : ndarray Binned values of cumulative frequency. lowerlimit : float Lower real limit binsize : float Width of each bin. extrapoints : int Extra points. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> x = [1, 4, 2, 1, 3, 1] >>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) >>> res.cumcount array([ 1., 2., 3., 3.]) >>> res.extrapoints 3 Create a normal distribution with 1000 random values >>> rng = np.random.RandomState(seed=12345) >>> samples = stats.norm.rvs(size=1000, random_state=rng) Calculate cumulative frequencies >>> res = stats.cumfreq(samples, numbins=25) Calculate space of values for x >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size, ... res.cumcount.size) Plot histogram and cumulative histogram >>> fig = plt.figure(figsize=(10, 4)) >>> ax1 = fig.add_subplot(1, 2, 1) >>> ax2 = fig.add_subplot(1, 2, 2) >>> ax1.hist(samples, bins=25) >>> ax1.set_title('Histogram') >>> ax2.bar(x, res.cumcount, width=res.binsize) >>> ax2.set_title('Cumulative histogram') >>> ax2.set_xlim([x.min(), x.max()]) >>> plt.show() """ h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) cumhist = np.cumsum(h * 1, axis=0) return CumfreqResult(cumhist, l, b, e) RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit', 'binsize', 'extrapoints')) def relfreq(a, numbins=10, defaultreallimits=None, weights=None): """ Return a relative frequency histogram, using the histogram function. A relative frequency histogram is a mapping of the number of observations in each of the bins relative to the total of observations. Parameters ---------- a : array_like Input array. numbins : int, optional The number of bins to use for the histogram. Default is 10. defaultreallimits : tuple (lower, upper), optional The lower and upper values for the range of the histogram. If no value is given, a range slightly larger than the range of the values in a is used. Specifically ``(a.min() - s, a.max() + s)``, where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``. weights : array_like, optional The weights for each value in `a`. Default is None, which gives each value a weight of 1.0 Returns ------- frequency : ndarray Binned values of relative frequency. lowerlimit : float Lower real limit binsize : float Width of each bin. extrapoints : int Extra points. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> a = np.array([2, 4, 1, 2, 3, 2]) >>> res = stats.relfreq(a, numbins=4) >>> res.frequency array([ 0.16666667, 0.5 , 0.16666667, 0.16666667]) >>> np.sum(res.frequency) # relative frequencies should add up to 1 1.0 Create a normal distribution with 1000 random values >>> rng = np.random.RandomState(seed=12345) >>> samples = stats.norm.rvs(size=1000, random_state=rng) Calculate relative frequencies >>> res = stats.relfreq(samples, numbins=25) Calculate space of values for x >>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size, ... res.frequency.size) Plot relative frequency histogram >>> fig = plt.figure(figsize=(5, 4)) >>> ax = fig.add_subplot(1, 1, 1) >>> ax.bar(x, res.frequency, width=res.binsize) >>> ax.set_title('Relative frequency histogram') >>> ax.set_xlim([x.min(), x.max()]) >>> plt.show() """ a = np.asanyarray(a) h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights) h = h / float(a.shape[0]) return RelfreqResult(h, l, b, e) ##################################### # VARIABILITY FUNCTIONS # ##################################### def obrientransform(*args): """ Compute the O'Brien transform on input data (any number of arrays). Used to test for homogeneity of variance prior to running one-way stats. Each array in ``*args`` is one level of a factor. If `f_oneway` is run on the transformed data and found significant, the variances are unequal. From Maxwell and Delaney [1]_, p.112. Parameters ---------- args : tuple of array_like Any number of arrays. Returns ------- obrientransform : ndarray Transformed data for use in an ANOVA. The first dimension of the result corresponds to the sequence of transformed arrays. If the arrays given are all 1-D of the same length, the return value is a 2-D array; otherwise it is a 1-D array of type object, with each element being an ndarray. References ---------- .. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990. Examples -------- We'll test the following data sets for differences in their variance. >>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10] >>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15] Apply the O'Brien transform to the data. >>> from scipy.stats import obrientransform >>> tx, ty = obrientransform(x, y) Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the transformed data. >>> from scipy.stats import f_oneway >>> F, p = f_oneway(tx, ty) >>> p 0.1314139477040335 If we require that ``p < 0.05`` for significance, we cannot conclude that the variances are different. """ TINY = np.sqrt(np.finfo(float).eps) # `arrays` will hold the transformed arguments. arrays = [] for arg in args: a = np.asarray(arg) n = len(a) mu = np.mean(a) sq = (a - mu)**2 sumsq = sq.sum() # The O'Brien transform. t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2)) # Check that the mean of the transformed data is equal to the # original variance. var = sumsq / (n - 1) if abs(var - np.mean(t)) > TINY: raise ValueError('Lack of convergence in obrientransform.') arrays.append(t) return np.array(arrays) def sem(a, axis=0, ddof=1, nan_policy='propagate'): """ Calculate the standard error of the mean (or standard error of measurement) of the values in the input array. Parameters ---------- a : array_like An array containing the values for which the standard error is returned. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Delta degrees-of-freedom. How many degrees of freedom to adjust for bias in limited samples relative to the population estimate of variance. Defaults to 1. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- s : ndarray or float The standard error of the mean in the sample(s), along the input axis. Notes ----- The default value for `ddof` is different to the default (0) used by other ddof containing routines, such as np.std and np.nanstd. Examples -------- Find standard error along the first axis: >>> from scipy import stats >>> a = np.arange(20).reshape(5,4) >>> stats.sem(a) array([ 2.8284, 2.8284, 2.8284, 2.8284]) Find standard error across the whole array, using n degrees of freedom: >>> stats.sem(a, axis=None, ddof=0) 1.2893796958227628 """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.sem(a, axis, ddof) n = a.shape[axis] s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n) return s def zscore(a, axis=0, ddof=0): """ Calculate the z score of each value in the sample, relative to the sample mean and standard deviation. Parameters ---------- a : array_like An array like object containing the sample data. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. ddof : int, optional Degrees of freedom correction in the calculation of the standard deviation. Default is 0. Returns ------- zscore : array_like The z-scores, standardized by mean and standard deviation of input array `a`. Notes ----- This function preserves ndarray subclasses, and works also with matrices and masked arrays (it uses `asanyarray` instead of `asarray` for parameters). Examples -------- >>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, ... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508]) >>> from scipy import stats >>> stats.zscore(a) array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786, 0.6748, -1.1488, -1.3324]) Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``) to calculate the standard deviation: >>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608], ... [ 0.7149, 0.0775, 0.6072, 0.9656], ... [ 0.6341, 0.1403, 0.9759, 0.4064], ... [ 0.5918, 0.6948, 0.904 , 0.3721], ... [ 0.0921, 0.2481, 0.1188, 0.1366]]) >>> stats.zscore(b, axis=1, ddof=1) array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358], [ 0.33048416, -1.37380874, 0.04251374, 1.00081084], [ 0.26796377, -1.12598418, 1.23283094, -0.37481053], [-0.22095197, 0.24468594, 1.19042819, -1.21416216], [-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]]) """ a = np.asanyarray(a) mns = a.mean(axis=axis) sstd = a.std(axis=axis, ddof=ddof) if axis and mns.ndim < a.ndim: return ((a - np.expand_dims(mns, axis=axis)) / np.expand_dims(sstd, axis=axis)) else: return (a - mns) / sstd def zmap(scores, compare, axis=0, ddof=0): """ Calculate the relative z-scores. Return an array of z-scores, i.e., scores that are standardized to zero mean and unit variance, where mean and variance are calculated from the comparison array. Parameters ---------- scores : array_like The input for which z-scores are calculated. compare : array_like The input from which the mean and standard deviation of the normalization are taken; assumed to have the same dimension as `scores`. axis : int or None, optional Axis over which mean and variance of `compare` are calculated. Default is 0. If None, compute over the whole array `scores`. ddof : int, optional Degrees of freedom correction in the calculation of the standard deviation. Default is 0. Returns ------- zscore : array_like Z-scores, in the same shape as `scores`. Notes ----- This function preserves ndarray subclasses, and works also with matrices and masked arrays (it uses `asanyarray` instead of `asarray` for parameters). Examples -------- >>> from scipy.stats import zmap >>> a = [0.5, 2.0, 2.5, 3] >>> b = [0, 1, 2, 3, 4] >>> zmap(a, b) array([-1.06066017, 0. , 0.35355339, 0.70710678]) """ scores, compare = map(np.asanyarray, [scores, compare]) mns = compare.mean(axis=axis) sstd = compare.std(axis=axis, ddof=ddof) if axis and mns.ndim < compare.ndim: return ((scores - np.expand_dims(mns, axis=axis)) / np.expand_dims(sstd, axis=axis)) else: return (scores - mns) / sstd # Private dictionary initialized only once at module level # See https://en.wikipedia.org/wiki/Robust_measures_of_scale _scale_conversions = {'raw': 1.0, 'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)} def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate', interpolation='linear', keepdims=False): """ Compute the interquartile range of the data along the specified axis. The interquartile range (IQR) is the difference between the 75th and 25th percentile of the data. It is a measure of the dispersion similar to standard deviation or variance, but is much more robust against outliers [2]_. The ``rng`` parameter allows this function to compute other percentile ranges than the actual IQR. For example, setting ``rng=(0, 100)`` is equivalent to `numpy.ptp`. The IQR of an empty array is `np.nan`. .. versionadded:: 0.18.0 Parameters ---------- x : array_like Input array or object that can be converted to an array. axis : int or sequence of int, optional Axis along which the range is computed. The default is to compute the IQR for the entire array. rng : Two-element sequence containing floats in range of [0,100] optional Percentiles over which to compute the range. Each must be between 0 and 100, inclusive. The default is the true IQR: `(25, 75)`. The order of the elements is not important. scale : scalar or str, optional The numerical value of scale will be divided out of the final result. The following string values are recognized: 'raw' : No scaling, just return the raw IQR. 'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`. The default is 'raw'. Array-like scale is also allowed, as long as it broadcasts correctly to the output such that ``out / scale`` is a valid operation. The output dimensions depend on the input array, `x`, the `axis` argument, and the `keepdims` flag. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional Specifies the interpolation method to use when the percentile boundaries lie between two data points `i` and `j`: * 'linear' : `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * 'lower' : `i`. * 'higher' : `j`. * 'nearest' : `i` or `j` whichever is nearest. * 'midpoint' : `(i + j) / 2`. Default is 'linear'. keepdims : bool, optional If this is set to `True`, the reduced axes are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array `x`. Returns ------- iqr : scalar or ndarray If ``axis=None``, a scalar is returned. If the input contains integers or floats of smaller precision than ``np.float64``, then the output data-type is ``np.float64``. Otherwise, the output data-type is the same as that of the input. See Also -------- numpy.std, numpy.var Examples -------- >>> from scipy.stats import iqr >>> x = np.array([[10, 7, 4], [3, 2, 1]]) >>> x array([[10, 7, 4], [ 3, 2, 1]]) >>> iqr(x) 4.0 >>> iqr(x, axis=0) array([ 3.5, 2.5, 1.5]) >>> iqr(x, axis=1) array([ 3., 1.]) >>> iqr(x, axis=1, keepdims=True) array([[ 3.], [ 1.]]) Notes ----- This function is heavily dependent on the version of `numpy` that is installed. Versions greater than 1.11.0b3 are highly recommended, as they include a number of enhancements and fixes to `numpy.percentile` and `numpy.nanpercentile` that affect the operation of this function. The following modifications apply: Below 1.10.0 : `nan_policy` is poorly defined. The default behavior of `numpy.percentile` is used for 'propagate'. This is a hybrid of 'omit' and 'propagate' that mostly yields a skewed version of 'omit' since NaNs are sorted to the end of the data. A warning is raised if there are NaNs in the data. Below 1.9.0: `numpy.nanpercentile` does not exist. This means that `numpy.percentile` is used regardless of `nan_policy` and a warning is issued. See previous item for a description of the behavior. Below 1.9.0: `keepdims` and `interpolation` are not supported. The keywords get ignored with a warning if supplied with non-default values. However, multiple axes are still supported. References ---------- .. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range .. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale .. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile """ x = asarray(x) # This check prevents percentile from raising an error later. Also, it is # consistent with `np.var` and `np.std`. if not x.size: return np.nan # An error may be raised here, so fail-fast, before doing lengthy # computations, even though `scale` is not used until later if isinstance(scale, string_types): scale_key = scale.lower() if scale_key not in _scale_conversions: raise ValueError("{0} not a valid scale for `iqr`".format(scale)) scale = _scale_conversions[scale_key] # Select the percentile function to use based on nans and policy contains_nan, nan_policy = _contains_nan(x, nan_policy) if contains_nan and nan_policy == 'omit': percentile_func = _iqr_nanpercentile else: percentile_func = _iqr_percentile if len(rng) != 2: raise TypeError("quantile range must be two element sequence") rng = sorted(rng) pct = percentile_func(x, rng, axis=axis, interpolation=interpolation, keepdims=keepdims, contains_nan=contains_nan) out = np.subtract(pct[1], pct[0]) if scale != 1.0: out /= scale return out def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False): """ Private wrapper that works around older versions of `numpy`. While this function is pretty much necessary for the moment, it should be removed as soon as the minimum supported numpy version allows. """ if contains_nan and NumpyVersion(np.__version__) < '1.10.0a': # I see no way to avoid the version check to ensure that the corrected # NaN behavior has been implemented except to call `percentile` on a # small array. msg = "Keyword nan_policy='propagate' not correctly supported for " \ "numpy versions < 1.10.x. The default behavior of " \ "`numpy.percentile` will be used." warnings.warn(msg, RuntimeWarning) try: # For older versions of numpy, there are two things that can cause a # problem here: missing keywords and non-scalar axis. The former can be # partially handled with a warning, the latter can be handled fully by # hacking in an implementation similar to numpy's function for # providing multi-axis functionality # (`numpy.lib.function_base._ureduce` for the curious). result = np.percentile(x, q, axis=axis, keepdims=keepdims, interpolation=interpolation) except TypeError: if interpolation != 'linear' or keepdims: # At time or writing, this means np.__version__ < 1.9.0 warnings.warn("Keywords interpolation and keepdims not supported " "for your version of numpy", RuntimeWarning) try: # Special processing if axis is an iterable original_size = len(axis) except TypeError: # Axis is a scalar at this point pass else: axis = np.unique(np.asarray(axis) % x.ndim) if original_size > axis.size: # mimic numpy if axes are duplicated raise ValueError("duplicate value in axis") if axis.size == x.ndim: # axis includes all axes: revert to None axis = None elif axis.size == 1: # no rolling necessary axis = axis[0] else: # roll multiple axes to the end and flatten that part out for ax in axis[::-1]: x = np.rollaxis(x, ax, x.ndim) x = x.reshape(x.shape[:-axis.size] + (np.prod(x.shape[-axis.size:]),)) axis = -1 result = np.percentile(x, q, axis=axis) return result def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False): """ Private wrapper that works around the following: 1. A bug in `np.nanpercentile` that was around until numpy version 1.11.0. 2. A bug in `np.percentile` NaN handling that was fixed in numpy version 1.10.0. 3. The non-existence of `np.nanpercentile` before numpy version 1.9.0. While this function is pretty much necessary for the moment, it should be removed as soon as the minimum supported numpy version allows. """ if hasattr(np, 'nanpercentile'): # At time or writing, this means np.__version__ < 1.9.0 result = np.nanpercentile(x, q, axis=axis, interpolation=interpolation, keepdims=keepdims) # If non-scalar result and nanpercentile does not do proper axis roll. # I see no way of avoiding the version test since dimensions may just # happen to match in the data. if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a': axis = np.asarray(axis) if axis.size == 1: # If only one axis specified, reduction happens along that dimension if axis.ndim == 0: axis = axis[None] result = np.rollaxis(result, axis[0]) else: # If multiple axes, reduced dimeision is last result = np.rollaxis(result, -1) else: msg = "Keyword nan_policy='omit' not correctly supported for numpy " \ "versions < 1.9.x. The default behavior of numpy.percentile " \ "will be used." warnings.warn(msg, RuntimeWarning) result = _iqr_percentile(x, q, axis=axis) return result ##################################### # TRIMMING FUNCTIONS # ##################################### SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper')) def sigmaclip(a, low=4., high=4.): """ Iterative sigma-clipping of array elements. Starting from the full sample, all elements outside the critical range are removed, i.e. all elements of the input array `c` that satisfy either of the following conditions :: c < mean(c) - std(c)*low c > mean(c) + std(c)*high The iteration continues with the updated sample until no elements are outside the (updated) range. Parameters ---------- a : array_like Data array, will be raveled if not 1-D. low : float, optional Lower bound factor of sigma clipping. Default is 4. high : float, optional Upper bound factor of sigma clipping. Default is 4. Returns ------- clipped : ndarray Input array with clipped elements removed. lower : float Lower threshold value use for clipping. upper : float Upper threshold value use for clipping. Examples -------- >>> from scipy.stats import sigmaclip >>> a = np.concatenate((np.linspace(9.5, 10.5, 31), ... np.linspace(0, 20, 5))) >>> fact = 1.5 >>> c, low, upp = sigmaclip(a, fact, fact) >>> c array([ 9.96666667, 10. , 10.03333333, 10. ]) >>> c.var(), c.std() (0.00055555555555555165, 0.023570226039551501) >>> low, c.mean() - fact*c.std(), c.min() (9.9646446609406727, 9.9646446609406727, 9.9666666666666668) >>> upp, c.mean() + fact*c.std(), c.max() (10.035355339059327, 10.035355339059327, 10.033333333333333) >>> a = np.concatenate((np.linspace(9.5, 10.5, 11), ... np.linspace(-100, -50, 3))) >>> c, low, upp = sigmaclip(a, 1.8, 1.8) >>> (c == np.linspace(9.5, 10.5, 11)).all() True """ c = np.asarray(a).ravel() delta = 1 while delta: c_std = c.std() c_mean = c.mean() size = c.size critlower = c_mean - c_std * low critupper = c_mean + c_std * high c = c[(c >= critlower) & (c <= critupper)] delta = size - c.size return SigmaclipResult(c, critlower, critupper) def trimboth(a, proportiontocut, axis=0): """ Slices off a proportion of items from both ends of an array. Slices off the passed proportion of items from both ends of the passed array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and** rightmost 10% of scores). The trimmed values are the lowest and highest ones. Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off`proportiontocut`). Parameters ---------- a : array_like Data to trim. proportiontocut : float Proportion (in range 0-1) of total data set to trim of each end. axis : int or None, optional Axis along which to trim data. Default is 0. If None, compute over the whole array `a`. Returns ------- out : ndarray Trimmed version of array `a`. The order of the trimmed content is undefined. See Also -------- trim_mean Examples -------- >>> from scipy import stats >>> a = np.arange(20) >>> b = stats.trimboth(a, 0.1) >>> b.shape (16,) """ a = np.asarray(a) if a.size == 0: return a if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] lowercut = int(proportiontocut * nobs) uppercut = nobs - lowercut if (lowercut >= uppercut): raise ValueError("Proportion too big.") atmp = np.partition(a, (lowercut, uppercut - 1), axis) sl = [slice(None)] * atmp.ndim sl[axis] = slice(lowercut, uppercut) return atmp[sl] def trim1(a, proportiontocut, tail='right', axis=0): """ Slices off a proportion from ONE end of the passed array distribution. If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost' 10% of scores. The lowest or highest values are trimmed (depending on the tail). Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off `proportiontocut` ). Parameters ---------- a : array_like Input array proportiontocut : float Fraction to cut off of 'left' or 'right' of distribution tail : {'left', 'right'}, optional Defaults to 'right'. axis : int or None, optional Axis along which to trim data. Default is 0. If None, compute over the whole array `a`. Returns ------- trim1 : ndarray Trimmed version of array `a`. The order of the trimmed content is undefined. """ a = np.asarray(a) if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] # avoid possible corner case if proportiontocut >= 1: return [] if tail.lower() == 'right': lowercut = 0 uppercut = nobs - int(proportiontocut * nobs) elif tail.lower() == 'left': lowercut = int(proportiontocut * nobs) uppercut = nobs atmp = np.partition(a, (lowercut, uppercut - 1), axis) return atmp[lowercut:uppercut] def trim_mean(a, proportiontocut, axis=0): """ Return mean of array after trimming distribution from both tails. If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of scores. The input is sorted before slicing. Slices off less if proportion results in a non-integer slice index (i.e., conservatively slices off `proportiontocut` ). Parameters ---------- a : array_like Input array proportiontocut : float Fraction to cut off of both tails of the distribution axis : int or None, optional Axis along which the trimmed means are computed. Default is 0. If None, compute over the whole array `a`. Returns ------- trim_mean : ndarray Mean of trimmed array. See Also -------- trimboth tmean : compute the trimmed mean ignoring values outside given `limits`. Examples -------- >>> from scipy import stats >>> x = np.arange(20) >>> stats.trim_mean(x, 0.1) 9.5 >>> x2 = x.reshape(5, 4) >>> x2 array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]]) >>> stats.trim_mean(x2, 0.25) array([ 8., 9., 10., 11.]) >>> stats.trim_mean(x2, 0.25, axis=1) array([ 1.5, 5.5, 9.5, 13.5, 17.5]) """ a = np.asarray(a) if a.size == 0: return np.nan if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] lowercut = int(proportiontocut * nobs) uppercut = nobs - lowercut if (lowercut > uppercut): raise ValueError("Proportion too big.") atmp = np.partition(a, (lowercut, uppercut - 1), axis) sl = [slice(None)] * atmp.ndim sl[axis] = slice(lowercut, uppercut) return np.mean(atmp[sl], axis=axis) F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) def f_oneway(*args): """ Performs a 1-way ANOVA. The one-way ANOVA tests the null hypothesis that two or more groups have the same population mean. The test is applied to samples from two or more groups, possibly with differing sizes. Parameters ---------- sample1, sample2, ... : array_like The sample measurements for each group. Returns ------- statistic : float The computed F-value of the test. pvalue : float The associated p-value from the F-distribution. Notes ----- The ANOVA test has important assumptions that must be satisfied in order for the associated p-value to be valid. 1. The samples are independent. 2. Each sample is from a normally distributed population. 3. The population standard deviations of the groups are all equal. This property is known as homoscedasticity. If these assumptions are not true for a given set of data, it may still be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although with some loss of power. The algorithm is from Heiman[2], pp.394-7. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 14. http://faculty.vassar.edu/lowry/ch14pt1.html .. [2] Heiman, G.W. Research Methods in Statistics. 2002. .. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA. http://www.biostathandbook.com/onewayanova.html Examples -------- >>> import scipy.stats as stats [3]_ Here are some data on a shell measurement (the length of the anterior adductor muscle scar, standardized by dividing by length) in the mussel Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon; Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a much larger data set used in McDonald et al. (1991). >>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, ... 0.0659, 0.0923, 0.0836] >>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835, ... 0.0725] >>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105] >>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, ... 0.0689] >>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045] >>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne) (7.1210194716424473, 0.00028122423145345439) """ args = [np.asarray(arg, dtype=float) for arg in args] # ANOVA on N groups, each in its own array num_groups = len(args) alldata = np.concatenate(args) bign = len(alldata) # Determine the mean of the data, and subtract that from all inputs to a # variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance # to a shift in location, and centering all data around zero vastly # improves numerical stability. offset = alldata.mean() alldata -= offset sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign)) ssbn = 0 for a in args: ssbn += _square_of_sums(a - offset) / float(len(a)) # Naming: variables ending in bn/b are for "between treatments", wn/w are # for "within treatments" ssbn -= (_square_of_sums(alldata) / float(bign)) sswn = sstot - ssbn dfbn = num_groups - 1 dfwn = bign - num_groups msb = ssbn / float(dfbn) msw = sswn / float(dfwn) f = msb / msw prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf return F_onewayResult(f, prob) def pearsonr(x, y): r""" Calculate a Pearson correlation coefficient and the p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. Strictly speaking, Pearson's correlation requires that each dataset be normally distributed, and not necessarily zero-mean. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- x : (N,) array_like Input y : (N,) array_like Input Returns ------- r : float Pearson's correlation coefficient p-value : float 2-tailed p-value Notes ----- The correlation coefficient is calculated as follows: .. math:: r_{pb} = \frac{\sum (x - m_x) (y - m_y) }{\sqrt{\sum (x - m_x)^2 (y - m_y)^2}} where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is the mean of the vector :math:`y`. References ---------- http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation Examples -------- >>> from scipy import stats >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) >>> b = np.arange(7) >>> stats.pearsonr(a, b) (0.8660254037844386, 0.011724811003954654) >>> stats.pearsonr([1,2,3,4,5], [5,6,7,8,7]) (0.83205029433784372, 0.080509573298498519) """ # x and y should have same length. x = np.asarray(x) y = np.asarray(y) n = len(x) mx = x.mean() my = y.mean() xm, ym = x - mx, y - my r_num = np.add.reduce(xm * ym) r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym)) r = r_num / r_den # Presumably, if abs(r) > 1, then it is only some small artifact of # floating point arithmetic. r = max(min(r, 1.0), -1.0) df = n - 2 if abs(r) == 1.0: prob = 0.0 else: t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r))) prob = _betai(0.5*df, 0.5, df/(df+t_squared)) return r, prob def fisher_exact(table, alternative='two-sided'): """Performs a Fisher exact test on a 2x2 contingency table. Parameters ---------- table : array_like of ints A 2x2 contingency table. Elements should be non-negative integers. alternative : {'two-sided', 'less', 'greater'}, optional Which alternative hypothesis to the null hypothesis the test uses. Default is 'two-sided'. Returns ------- oddsratio : float This is prior odds ratio and not a posterior estimate. p_value : float P-value, the probability of obtaining a distribution at least as extreme as the one that was actually observed, assuming that the null hypothesis is true. See Also -------- chi2_contingency : Chi-square test of independence of variables in a contingency table. Notes ----- The calculated odds ratio is different from the one R uses. This scipy implementation returns the (more common) "unconditional Maximum Likelihood Estimate", while R uses the "conditional Maximum Likelihood Estimate". For tables with large numbers, the (inexact) chi-square test implemented in the function `chi2_contingency` can also be used. Examples -------- Say we spend a few days counting whales and sharks in the Atlantic and Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the Indian ocean 2 whales and 5 sharks. Then our contingency table is:: Atlantic Indian whales 8 2 sharks 1 5 We use this table to find the p-value: >>> import scipy.stats as stats >>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]]) >>> pvalue 0.0349... The probability that we would observe this or an even more imbalanced ratio by chance is about 3.5%. A commonly used significance level is 5%--if we adopt that, we can therefore conclude that our observed imbalance is statistically significant; whales prefer the Atlantic while sharks prefer the Indian ocean. """ hypergeom = distributions.hypergeom c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm if not c.shape == (2, 2): raise ValueError("The input `table` must be of shape (2, 2).") if np.any(c < 0): raise ValueError("All values in `table` must be nonnegative.") if 0 in c.sum(axis=0) or 0 in c.sum(axis=1): # If both values in a row or column are zero, the p-value is 1 and # the odds ratio is NaN. return np.nan, 1.0 if c[1, 0] > 0 and c[0, 1] > 0: oddsratio = c[0, 0] * c[1, 1] / float(c[1, 0] * c[0, 1]) else: oddsratio = np.inf n1 = c[0, 0] + c[0, 1] n2 = c[1, 0] + c[1, 1] n = c[0, 0] + c[1, 0] def binary_search(n, n1, n2, side): """Binary search for where to begin lower/upper halves in two-sided test. """ if side == "upper": minval = mode maxval = n else: minval = 0 maxval = mode guess = -1 while maxval - minval > 1: if maxval == minval + 1 and guess == minval: guess = maxval else: guess = (maxval + minval) // 2 pguess = hypergeom.pmf(guess, n1 + n2, n1, n) if side == "upper": ng = guess - 1 else: ng = guess + 1 if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n): break elif pguess < pexact: maxval = guess else: minval = guess if guess == -1: guess = minval if side == "upper": while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: guess -= 1 while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: guess += 1 else: while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon: guess += 1 while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon: guess -= 1 return guess if alternative == 'less': pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) elif alternative == 'greater': # Same formula as the 'less' case, but with the second column. pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1]) elif alternative == 'two-sided': mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2)) pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n) pmode = hypergeom.pmf(mode, n1 + n2, n1, n) epsilon = 1 - 1e-4 if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon: return oddsratio, 1. elif c[0, 0] < mode: plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n) if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon: return oddsratio, plower guess = binary_search(n, n1, n2, "upper") pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n) else: pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n) if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon: return oddsratio, pupper guess = binary_search(n, n1, n2, "lower") pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n) else: msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}" raise ValueError(msg) if pvalue > 1.0: pvalue = 1.0 return oddsratio, pvalue SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue')) def spearmanr(a, b=None, axis=0, nan_policy='propagate'): """ Calculate a Spearman rank-order correlation coefficient and the p-value to test for non-correlation. The Spearman correlation is a nonparametric measure of the monotonicity of the relationship between two datasets. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact monotonic relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- a, b : 1D or 2D array_like, b is optional One or two 1-D or 2-D arrays containing multiple variables and observations. When these are 1-D, each represents a vector of observations of a single variable. For the behavior in the 2-D case, see under ``axis``, below. Both arrays need to have the same length in the ``axis`` dimension. axis : int or None, optional If axis=0 (default), then each column represents a variable, with observations in the rows. If axis=1, the relationship is transposed: each row represents a variable, while the columns contain observations. If axis=None, then both arrays will be raveled. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- correlation : float or ndarray (2-D square) Spearman correlation matrix or correlation coefficient (if only 2 variables are given as parameters. Correlation matrix is square with length equal to total number of variables (columns or rows) in a and b combined. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is that two sets of data are uncorrelated, has same dimension as rho. Notes ----- Changes in scipy 0.8.0: rewrite to add tie-handling, and axis. References ---------- .. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard Probability and Statistics Tables and Formulae. Chapman & Hall: New York. 2000. Section 14.7 Examples -------- >>> from scipy import stats >>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7]) (0.82078268166812329, 0.088587005313543798) >>> np.random.seed(1234321) >>> x2n = np.random.randn(100, 2) >>> y2n = np.random.randn(100, 2) >>> stats.spearmanr(x2n) (0.059969996999699973, 0.55338590803773591) >>> stats.spearmanr(x2n[:,0], x2n[:,1]) (0.059969996999699973, 0.55338590803773591) >>> rho, pval = stats.spearmanr(x2n, y2n) >>> rho array([[ 1. , 0.05997 , 0.18569457, 0.06258626], [ 0.05997 , 1. , 0.110003 , 0.02534653], [ 0.18569457, 0.110003 , 1. , 0.03488749], [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) >>> pval array([[ 0. , 0.55338591, 0.06435364, 0.53617935], [ 0.55338591, 0. , 0.27592895, 0.80234077], [ 0.06435364, 0.27592895, 0. , 0.73039992], [ 0.53617935, 0.80234077, 0.73039992, 0. ]]) >>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1) >>> rho array([[ 1. , 0.05997 , 0.18569457, 0.06258626], [ 0.05997 , 1. , 0.110003 , 0.02534653], [ 0.18569457, 0.110003 , 1. , 0.03488749], [ 0.06258626, 0.02534653, 0.03488749, 1. ]]) >>> stats.spearmanr(x2n, y2n, axis=None) (0.10816770419260482, 0.1273562188027364) >>> stats.spearmanr(x2n.ravel(), y2n.ravel()) (0.10816770419260482, 0.1273562188027364) >>> xint = np.random.randint(10, size=(100, 2)) >>> stats.spearmanr(xint) (0.052760927029710199, 0.60213045837062351) """ a, axisout = _chk_asarray(a, axis) a_contains_nan, nan_policy = _contains_nan(a, nan_policy) if a_contains_nan: a = ma.masked_invalid(a) if a.size <= 1: return SpearmanrResult(np.nan, np.nan) ar = np.apply_along_axis(rankdata, axisout, a) br = None if b is not None: b, axisout = _chk_asarray(b, axis) b_contains_nan, nan_policy = _contains_nan(b, nan_policy) if a_contains_nan or b_contains_nan: b = ma.masked_invalid(b) if nan_policy == 'propagate': rho, pval = mstats_basic.spearmanr(a, b, use_ties=True) return SpearmanrResult(rho * np.nan, pval * np.nan) if nan_policy == 'omit': return mstats_basic.spearmanr(a, b, use_ties=True) br = np.apply_along_axis(rankdata, axisout, b) n = a.shape[axisout] rs = np.corrcoef(ar, br, rowvar=axisout) olderr = np.seterr(divide='ignore') # rs can have elements equal to 1 try: # clip the small negative values possibly caused by rounding # errors before taking the square root t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0)) finally: np.seterr(**olderr) prob = 2 * distributions.t.sf(np.abs(t), n-2) if rs.shape == (2, 2): return SpearmanrResult(rs[1, 0], prob[1, 0]) else: return SpearmanrResult(rs, prob) PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation', 'pvalue')) def pointbiserialr(x, y): r""" Calculate a point biserial correlation coefficient and its p-value. The point biserial correlation is used to measure the relationship between a binary variable, x, and a continuous variable, y. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply a determinative relationship. This function uses a shortcut formula but produces the same result as `pearsonr`. Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- correlation : float R value pvalue : float 2-tailed p-value Notes ----- `pointbiserialr` uses a t-test with ``n-1`` degrees of freedom. It is equivalent to `pearsonr.` The value of the point-biserial correlation can be calculated from: .. math:: r_{pb} = \frac{\overline{Y_{1}} - \overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}} Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}` are number of observations coded 0 and 1 respectively; :math:`N` is the total number of observations and :math:`s_{y}` is the standard deviation of all the metric observations. A value of :math:`r_{pb}` that is significantly different from zero is completely equivalent to a significant difference in means between the two groups. Thus, an independent groups t Test with :math:`N-2` degrees of freedom may be used to test whether :math:`r_{pb}` is nonzero. The relation between the t-statistic for comparing two independent groups and :math:`r_{pb}` is given by: .. math:: t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}} References ---------- .. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math. Statist., Vol. 20, no.1, pp. 125-126, 1949. .. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25, np. 3, pp. 603-607, 1954. .. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full Examples -------- >>> from scipy import stats >>> a = np.array([0, 0, 0, 1, 1, 1, 1]) >>> b = np.arange(7) >>> stats.pointbiserialr(a, b) (0.8660254037844386, 0.011724811003954652) >>> stats.pearsonr(a, b) (0.86602540378443871, 0.011724811003954626) >>> np.corrcoef(a, b) array([[ 1. , 0.8660254], [ 0.8660254, 1. ]]) """ rpb, prob = pearsonr(x, y) return PointbiserialrResult(rpb, prob) KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue')) def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate'): """ Calculate Kendall's tau, a correlation measure for ordinal data. Kendall's tau is a measure of the correspondence between two rankings. Values close to 1 indicate strong agreement, values close to -1 indicate strong disagreement. This is the 1945 "tau-b" version of Kendall's tau [2]_, which can account for ties and which reduces to the 1938 "tau-a" version [1]_ in absence of ties. Parameters ---------- x, y : array_like Arrays of rankings, of the same shape. If arrays are not 1-D, they will be flattened to 1-D. initial_lexsort : bool, optional Unused (deprecated). nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Note that if the input contains nan 'omit' delegates to mstats_basic.kendalltau(), which has a different implementation. Returns ------- correlation : float The tau statistic. pvalue : float The two-sided p-value for a hypothesis test whose null hypothesis is an absence of association, tau = 0. See also -------- spearmanr : Calculates a Spearman rank-order correlation coefficient. theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). weightedtau : Computes a weighted version of Kendall's tau. Notes ----- The definition of Kendall's tau that is used is [2]_:: tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U)) where P is the number of concordant pairs, Q the number of discordant pairs, T the number of ties only in `x`, and U the number of ties only in `y`. If a tie occurs for the same pair in both `x` and `y`, it is not added to either T or U. References ---------- .. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika Vol. 30, No. 1/2, pp. 81-93, 1938. .. [2] Maurice G. Kendall, "The treatment of ties in ranking problems", Biometrika Vol. 33, No. 3, pp. 239-251. 1945. .. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John Wiley & Sons, 1967. .. [4] Peter M. Fenwick, "A new data structure for cumulative frequency tables", Software: Practice and Experience, Vol. 24, No. 3, pp. 327-336, 1994. Examples -------- >>> from scipy import stats >>> x1 = [12, 2, 1, 12, 2] >>> x2 = [1, 4, 7, 1, 0] >>> tau, p_value = stats.kendalltau(x1, x2) >>> tau -0.47140452079103173 >>> p_value 0.2827454599327748 """ x = np.asarray(x).ravel() y = np.asarray(y).ravel() if x.size != y.size: raise ValueError("All inputs to `kendalltau` must be of the same size, " "found x-size %s and y-size %s" % (x.size, y.size)) elif not x.size or not y.size: return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty # check both x and y cnx, npx = _contains_nan(x, nan_policy) cny, npy = _contains_nan(y, nan_policy) contains_nan = cnx or cny if npx == 'omit' or npy == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'propagate': return KendalltauResult(np.nan, np.nan) elif contains_nan and nan_policy == 'omit': x = ma.masked_invalid(x) y = ma.masked_invalid(y) return mstats_basic.kendalltau(x, y) if initial_lexsort is not None: # deprecate to drop! warnings.warn('"initial_lexsort" is gone!') def count_rank_tie(ranks): cnt = np.bincount(ranks).astype('int64', copy=False) cnt = cnt[cnt > 1] return ((cnt * (cnt - 1) // 2).sum(), (cnt * (cnt - 1.) * (cnt - 2)).sum(), (cnt * (cnt - 1.) * (2*cnt + 5)).sum()) size = x.size perm = np.argsort(y) # sort on y and convert y to dense ranks x, y = x[perm], y[perm] y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp) # stable sort on x and convert x to dense ranks perm = np.argsort(x, kind='mergesort') x, y = x[perm], y[perm] x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp) dis = _kendall_dis(x, y) # discordant pairs obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True] cnt = np.diff(np.where(obs)[0]).astype('int64', copy=False) ntie = (cnt * (cnt - 1) // 2).sum() # joint ties xtie, x0, x1 = count_rank_tie(x) # ties in x, stats ytie, y0, y1 = count_rank_tie(y) # ties in y, stats tot = (size * (size - 1)) // 2 if xtie == tot or ytie == tot: return KendalltauResult(np.nan, np.nan) # Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie # = con + dis + xtie + ytie - ntie con_minus_dis = tot - xtie - ytie + ntie - 2 * dis tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie) # Limit range to fix computational errors tau = min(1., max(-1., tau)) # con_minus_dis is approx normally distributed with this variance [3]_ var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + ( 2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. * size * (size - 1) * (size - 2)) pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2)) # Limit range to fix computational errors return KendalltauResult(min(1., max(-1., tau)), pvalue) WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue')) def weightedtau(x, y, rank=True, weigher=None, additive=True): r""" Compute a weighted version of Kendall's :math:`\tau`. The weighted :math:`\tau` is a weighted version of Kendall's :math:`\tau` in which exchanges of high weight are more influential than exchanges of low weight. The default parameters compute the additive hyperbolic version of the index, :math:`\tau_\mathrm h`, which has been shown to provide the best balance between important and unimportant elements [1]_. The weighting is defined by means of a rank array, which assigns a nonnegative rank to each element, and a weigher function, which assigns a weight based from the rank to each element. The weight of an exchange is then the sum or the product of the weights of the ranks of the exchanged elements. The default parameters compute :math:`\tau_\mathrm h`: an exchange between elements with rank :math:`r` and :math:`s` (starting from zero) has weight :math:`1/(r+1) + 1/(s+1)`. Specifying a rank array is meaningful only if you have in mind an external criterion of importance. If, as it usually happens, you do not have in mind a specific rank, the weighted :math:`\tau` is defined by averaging the values obtained using the decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the behavior with default parameters. Note that if you are computing the weighted :math:`\tau` on arrays of ranks, rather than of scores (i.e., a larger value implies a lower rank) you must negate the ranks, so that elements of higher rank are associated with a larger value. Parameters ---------- x, y : array_like Arrays of scores, of the same shape. If arrays are not 1-D, they will be flattened to 1-D. rank: array_like of ints or bool, optional A nonnegative rank assigned to each element. If it is None, the decreasing lexicographical rank by (`x`, `y`) will be used: elements of higher rank will be those with larger `x`-values, using `y`-values to break ties (in particular, swapping `x` and `y` will give a different result). If it is False, the element indices will be used directly as ranks. The default is True, in which case this function returns the average of the values obtained using the decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`). weigher : callable, optional The weigher function. Must map nonnegative integers (zero representing the most important element) to a nonnegative weight. The default, None, provides hyperbolic weighing, that is, rank :math:`r` is mapped to weight :math:`1/(r+1)`. additive : bool, optional If True, the weight of an exchange is computed by adding the weights of the ranks of the exchanged elements; otherwise, the weights are multiplied. The default is True. Returns ------- correlation : float The weighted :math:`\tau` correlation index. pvalue : float Presently ``np.nan``, as the null statistics is unknown (even in the additive hyperbolic case). See also -------- kendalltau : Calculates Kendall's tau. spearmanr : Calculates a Spearman rank-order correlation coefficient. theilslopes : Computes the Theil-Sen estimator for a set of points (x, y). Notes ----- This function uses an :math:`O(n \log n)`, mergesort-based algorithm [1]_ that is a weighted extension of Knight's algorithm for Kendall's :math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_ between rankings without ties (i.e., permutations) by setting `additive` and `rank` to False, as the definition given in [1]_ is a generalization of Shieh's. NaNs are considered the smallest possible score. .. versionadded:: 0.19.0 References ---------- .. [1] Sebastiano Vigna, "A weighted correlation index for rankings with ties", Proceedings of the 24th international conference on World Wide Web, pp. 1166-1176, ACM, 2015. .. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with Ungrouped Data", Journal of the American Statistical Association, Vol. 61, No. 314, Part 1, pp. 436-439, 1966. .. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics & Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998. Examples -------- >>> from scipy import stats >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> tau, p_value = stats.weightedtau(x, y) >>> tau -0.56694968153682723 >>> p_value nan >>> tau, p_value = stats.weightedtau(x, y, additive=False) >>> tau -0.62205716951801038 NaNs are considered the smallest possible score: >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, np.nan] >>> tau, _ = stats.weightedtau(x, y) >>> tau -0.56694968153682723 This is exactly Kendall's tau: >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1) >>> tau -0.47140452079103173 >>> x = [12, 2, 1, 12, 2] >>> y = [1, 4, 7, 1, 0] >>> stats.weightedtau(x, y, rank=None) WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan) >>> stats.weightedtau(y, x, rank=None) WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan) """ x = np.asarray(x).ravel() y = np.asarray(y).ravel() if x.size != y.size: raise ValueError("All inputs to `weightedtau` must be of the same size, " "found x-size %s and y-size %s" % (x.size, y.size)) if not x.size: return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty # If there are NaNs we apply _toint64() if np.isnan(np.sum(x)): x = _toint64(x) if np.isnan(np.sum(x)): y = _toint64(y) # Reduce to ranks unsupported types if x.dtype != y.dtype: if x.dtype != np.int64: x = _toint64(x) if y.dtype != np.int64: y = _toint64(y) else: if x.dtype not in (np.int32, np.int64, np.float32, np.float64): x = _toint64(x) y = _toint64(y) if rank is True: return WeightedTauResult(( _weightedrankedtau(x, y, None, weigher, additive) + _weightedrankedtau(y, x, None, weigher, additive) ) / 2, np.nan) if rank is False: rank = np.arange(x.size, dtype=np.intp) elif rank is not None: rank = np.asarray(rank).ravel() if rank.size != x.size: raise ValueError("All inputs to `weightedtau` must be of the same size, " "found x-size %s and rank-size %s" % (x.size, rank.size)) return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan) ##################################### # INFERENTIAL STATISTICS # ##################################### Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'): """ Calculate the T-test for the mean of ONE group of scores. This is a two-sided test for the null hypothesis that the expected value (mean) of a sample of independent observations `a` is equal to the given population mean, `popmean`. Parameters ---------- a : array_like sample observation popmean : float or array_like expected value in null hypothesis. If array_like, then it must have the same shape as `a` excluding the axis dimension axis : int or None, optional Axis along which to compute test. If None, compute over the whole array `a`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Examples -------- >>> from scipy import stats >>> np.random.seed(7654567) # fix seed to get the same result >>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2)) Test if mean of random sample is equal to true mean, and different mean. We reject the null hypothesis in the second case and don't reject it in the first case. >>> stats.ttest_1samp(rvs,5.0) (array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674])) >>> stats.ttest_1samp(rvs,0.0) (array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999])) Examples using axis and non-scalar dimension for population mean. >>> stats.ttest_1samp(rvs,[5.0,0.0]) (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) >>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1) (array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04])) >>> stats.ttest_1samp(rvs,[[5.0],[0.0]]) (array([[-0.68014479, -0.04323899], [ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01], [ 7.89094663e-03, 1.49986458e-04]])) """ a, axis = _chk_asarray(a, axis) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.ttest_1samp(a, popmean, axis) n = a.shape[axis] df = n - 1 d = np.mean(a, axis) - popmean v = np.var(a, axis, ddof=1) denom = np.sqrt(v / float(n)) with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(d, denom) t, prob = _ttest_finish(df, t) return Ttest_1sampResult(t, prob) def _ttest_finish(df, t): """Common code between all 3 t-test functions.""" prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail if t.ndim == 0: t = t[()] return t, prob def _ttest_ind_from_stats(mean1, mean2, denom, df): d = mean1 - mean2 with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(d, denom) t, prob = _ttest_finish(df, t) return (t, prob) def _unequal_var_ttest_denom(v1, n1, v2, n2): vn1 = v1 / n1 vn2 = v2 / n2 with np.errstate(divide='ignore', invalid='ignore'): df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) # If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0). # Hence it doesn't matter what df is as long as it's not NaN. df = np.where(np.isnan(df), 1, df) denom = np.sqrt(vn1 + vn2) return df, denom def _equal_var_ttest_denom(v1, n1, v2, n2): df = n1 + n2 - 2.0 svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2)) return df, denom Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True): """ T-test for means of two independent samples from descriptive statistics. This is a two-sided test for the null hypothesis that two independent samples have identical average (expected) values. Parameters ---------- mean1 : array_like The mean(s) of sample 1. std1 : array_like The standard deviation(s) of sample 1. nobs1 : array_like The number(s) of observations of sample 1. mean2 : array_like The mean(s) of sample 2 std2 : array_like The standard deviations(s) of sample 2. nobs2 : array_like The number(s) of observations of sample 2. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. Returns ------- statistic : float or array The calculated t-statistics pvalue : float or array The two-tailed p-value. See Also -------- scipy.stats.ttest_ind Notes ----- .. versionadded:: 0.16.0 References ---------- .. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test Examples -------- Suppose we have the summary data for two samples, as follows:: Sample Sample Size Mean Variance Sample 1 13 15.0 87.5 Sample 2 11 12.0 39.0 Apply the t-test to this data (with the assumption that the population variances are equal): >>> from scipy.stats import ttest_ind_from_stats >>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13, ... mean2=12.0, std2=np.sqrt(39.0), nobs2=11) Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487) For comparison, here is the data from which those summary statistics were taken. With this data, we can compute the same result using `scipy.stats.ttest_ind`: >>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26]) >>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21]) >>> from scipy.stats import ttest_ind >>> ttest_ind(a, b) Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486) """ if equal_var: df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) else: df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2) res = _ttest_ind_from_stats(mean1, mean2, denom, df) return Ttest_indResult(*res) def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'): """ Calculate the T-test for the means of *two independent* samples of scores. This is a two-sided test for the null hypothesis that 2 independent samples have identical average (expected) values. This test assumes that the populations have identical variances by default. Parameters ---------- a, b : array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. equal_var : bool, optional If True (default), perform a standard independent 2 sample test that assumes equal population variances [1]_. If False, perform Welch's t-test, which does not assume equal population variance [2]_. .. versionadded:: 0.11.0 nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array The calculated t-statistic. pvalue : float or array The two-tailed p-value. Notes ----- We can use this test, if we observe two independent samples from the same or different population, e.g. exam scores of boys and girls or of two ethnic groups. The test measures whether the average (expected) value differs significantly across samples. If we observe a large p-value, for example larger than 0.05 or 0.1, then we cannot reject the null hypothesis of identical average scores. If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, then we reject the null hypothesis of equal averages. References ---------- .. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test .. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) Test with sample with identical means: >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) >>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500) >>> stats.ttest_ind(rvs1,rvs2) (0.26833823296239279, 0.78849443369564776) >>> stats.ttest_ind(rvs1,rvs2, equal_var = False) (0.26833823296239279, 0.78849452749500748) `ttest_ind` underestimates p for unequal variances: >>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500) >>> stats.ttest_ind(rvs1, rvs3) (-0.46580283298287162, 0.64145827413436174) >>> stats.ttest_ind(rvs1, rvs3, equal_var = False) (-0.46580283298287162, 0.64149646246569292) When n1 != n2, the equal variance t-statistic is no longer equal to the unequal variance t-statistic: >>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100) >>> stats.ttest_ind(rvs1, rvs4) (-0.99882539442782481, 0.3182832709103896) >>> stats.ttest_ind(rvs1, rvs4, equal_var = False) (-0.69712570584654099, 0.48716927725402048) T-test with different means, variance, and n: >>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100) >>> stats.ttest_ind(rvs1, rvs5) (-1.4679669854490653, 0.14263895620529152) >>> stats.ttest_ind(rvs1, rvs5, equal_var = False) (-0.94365973617132992, 0.34744170334794122) """ a, b, axis = _chk2_asarray(a, b, axis) # check both a and b cna, npa = _contains_nan(a, nan_policy) cnb, npb = _contains_nan(b, nan_policy) contains_nan = cna or cnb if npa == 'omit' or npb == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) b = ma.masked_invalid(b) return mstats_basic.ttest_ind(a, b, axis, equal_var) if a.size == 0 or b.size == 0: return Ttest_indResult(np.nan, np.nan) v1 = np.var(a, axis, ddof=1) v2 = np.var(b, axis, ddof=1) n1 = a.shape[axis] n2 = b.shape[axis] if equal_var: df, denom = _equal_var_ttest_denom(v1, n1, v2, n2) else: df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2) res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df) return Ttest_indResult(*res) Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) def ttest_rel(a, b, axis=0, nan_policy='propagate'): """ Calculate the T-test on TWO RELATED samples of scores, a and b. This is a two-sided test for the null hypothesis that 2 related or repeated samples have identical average (expected) values. Parameters ---------- a, b : array_like The arrays must have the same shape. axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Notes ----- Examples for the use are scores of the same set of student in different exams, or repeated sampling from the same units. The test measures whether the average score differs significantly across samples (e.g. exams). If we observe a large p-value, for example greater than 0.05 or 0.1 then we cannot reject the null hypothesis of identical average scores. If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, then we reject the null hypothesis of equal averages. Small p-values are associated with large t-statistics. References ---------- https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) # fix random seed to get same numbers >>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500) >>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) + ... stats.norm.rvs(scale=0.2,size=500)) >>> stats.ttest_rel(rvs1,rvs2) (0.24101764965300962, 0.80964043445811562) >>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) + ... stats.norm.rvs(scale=0.2,size=500)) >>> stats.ttest_rel(rvs1,rvs3) (-3.9995108708727933, 7.3082402191726459e-005) """ a, b, axis = _chk2_asarray(a, b, axis) cna, npa = _contains_nan(a, nan_policy) cnb, npb = _contains_nan(b, nan_policy) contains_nan = cna or cnb if npa == 'omit' or npb == 'omit': nan_policy = 'omit' if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) b = ma.masked_invalid(b) m = ma.mask_or(ma.getmask(a), ma.getmask(b)) aa = ma.array(a, mask=m, copy=True) bb = ma.array(b, mask=m, copy=True) return mstats_basic.ttest_rel(aa, bb, axis) if a.shape[axis] != b.shape[axis]: raise ValueError('unequal length arrays') if a.size == 0 or b.size == 0: return np.nan, np.nan n = a.shape[axis] df = float(n - 1) d = (a - b).astype(np.float64) v = np.var(d, axis, ddof=1) dm = np.mean(d, axis) denom = np.sqrt(v / float(n)) with np.errstate(divide='ignore', invalid='ignore'): t = np.divide(dm, denom) t, prob = _ttest_finish(df, t) return Ttest_relResult(t, prob) KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue')) def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'): """ Perform the Kolmogorov-Smirnov test for goodness of fit. This performs a test of the distribution G(x) of an observed random variable against a given distribution F(x). Under the null hypothesis the two distributions are identical, G(x)=F(x). The alternative hypothesis can be either 'two-sided' (default), 'less' or 'greater'. The KS test is only valid for continuous distributions. Parameters ---------- rvs : str, array or callable If a string, it should be the name of a distribution in `scipy.stats`. If an array, it should be a 1-D array of observations of random variables. If a callable, it should be a function to generate random variables; it is required to have a keyword argument `size`. cdf : str or callable If a string, it should be the name of a distribution in `scipy.stats`. If `rvs` is a string then `cdf` can be False or the same as `rvs`. If a callable, that callable is used to calculate the cdf. args : tuple, sequence, optional Distribution parameters, used if `rvs` or `cdf` are strings. N : int, optional Sample size if `rvs` is string or callable. Default is 20. alternative : {'two-sided', 'less','greater'}, optional Defines the alternative hypothesis (see explanation above). Default is 'two-sided'. mode : 'approx' (default) or 'asymp', optional Defines the distribution used for calculating the p-value. - 'approx' : use approximation to exact distribution of test statistic - 'asymp' : use asymptotic distribution of test statistic Returns ------- statistic : float KS test statistic, either D, D+ or D-. pvalue : float One-tailed or two-tailed p-value. Notes ----- In the one-sided test, the alternative is that the empirical cumulative distribution function of the random variable is "less" or "greater" than the cumulative distribution function F(x) of the hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``. Examples -------- >>> from scipy import stats >>> x = np.linspace(-15, 15, 9) >>> stats.kstest(x, 'norm') (0.44435602715924361, 0.038850142705171065) >>> np.random.seed(987654321) # set random seed to get the same result >>> stats.kstest('norm', False, N=100) (0.058352892479417884, 0.88531190944151261) The above lines are equivalent to: >>> np.random.seed(987654321) >>> stats.kstest(stats.norm.rvs(size=100), 'norm') (0.058352892479417884, 0.88531190944151261) *Test against one-sided alternative hypothesis* Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``: >>> np.random.seed(987654321) >>> x = stats.norm.rvs(loc=0.2, size=100) >>> stats.kstest(x,'norm', alternative = 'less') (0.12464329735846891, 0.040989164077641749) Reject equal distribution against alternative hypothesis: less >>> stats.kstest(x,'norm', alternative = 'greater') (0.0072115233216311081, 0.98531158590396395) Don't reject equal distribution against alternative hypothesis: greater >>> stats.kstest(x,'norm', mode='asymp') (0.12464329735846891, 0.08944488871182088) *Testing t distributed random variables against normal distribution* With 100 degrees of freedom the t distribution looks close to the normal distribution, and the K-S test does not reject the hypothesis that the sample came from the normal distribution: >>> np.random.seed(987654321) >>> stats.kstest(stats.t.rvs(100,size=100),'norm') (0.072018929165471257, 0.67630062862479168) With 3 degrees of freedom the t distribution looks sufficiently different from the normal distribution, that we can reject the hypothesis that the sample came from the normal distribution at the 10% level: >>> np.random.seed(987654321) >>> stats.kstest(stats.t.rvs(3,size=100),'norm') (0.131016895759829, 0.058826222555312224) """ if isinstance(rvs, string_types): if (not cdf) or (cdf == rvs): cdf = getattr(distributions, rvs).cdf rvs = getattr(distributions, rvs).rvs else: raise AttributeError("if rvs is string, cdf has to be the " "same distribution") if isinstance(cdf, string_types): cdf = getattr(distributions, cdf).cdf if callable(rvs): kwds = {'size': N} vals = np.sort(rvs(*args, **kwds)) else: vals = np.sort(rvs) N = len(vals) cdfvals = cdf(vals, *args) # to not break compatibility with existing code if alternative == 'two_sided': alternative = 'two-sided' if alternative in ['two-sided', 'greater']: Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max() if alternative == 'greater': return KstestResult(Dplus, distributions.ksone.sf(Dplus, N)) if alternative in ['two-sided', 'less']: Dmin = (cdfvals - np.arange(0.0, N)/N).max() if alternative == 'less': return KstestResult(Dmin, distributions.ksone.sf(Dmin, N)) if alternative == 'two-sided': D = np.max([Dplus, Dmin]) if mode == 'asymp': return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N))) if mode == 'approx': pval_two = distributions.kstwobign.sf(D * np.sqrt(N)) if N > 2666 or pval_two > 0.80 - N*0.3/1000: return KstestResult(D, pval_two) else: return KstestResult(D, 2 * distributions.ksone.sf(D, N)) # Map from names to lambda_ values used in power_divergence(). _power_div_lambda_names = { "pearson": 1, "log-likelihood": 0, "freeman-tukey": -0.5, "mod-log-likelihood": -1, "neyman": -2, "cressie-read": 2/3, } def _count(a, axis=None): """ Count the number of non-masked elements of an array. This function behaves like np.ma.count(), but is much faster for ndarrays. """ if hasattr(a, 'count'): num = a.count(axis=axis) if isinstance(num, np.ndarray) and num.ndim == 0: # In some cases, the `count` method returns a scalar array (e.g. # np.array(3)), but we want a plain integer. num = int(num) else: if axis is None: num = a.size else: num = a.shape[axis] return num Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic', 'pvalue')) def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None): """ Cressie-Read power divergence statistic and goodness of fit test. This function tests the null hypothesis that the categorical data has the given frequencies, using the Cressie-Read power divergence statistic. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default the categories are assumed to be equally likely. ddof : int, optional "Delta degrees of freedom": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with ``k - 1 - ddof`` degrees of freedom, where `k` is the number of observed frequencies. The default value of `ddof` is 0. axis : int or None, optional The axis of the broadcast result of `f_obs` and `f_exp` along which to apply the test. If axis is None, all values in `f_obs` are treated as a single data set. Default is 0. lambda_ : float or str, optional `lambda_` gives the power in the Cressie-Read power divergence statistic. The default is 1. For convenience, `lambda_` may be assigned one of the following strings, in which case the corresponding numerical value is used:: String Value Description "pearson" 1 Pearson's chi-squared statistic. In this case, the function is equivalent to `stats.chisquare`. "log-likelihood" 0 Log-likelihood ratio. Also known as the G-test [3]_. "freeman-tukey" -1/2 Freeman-Tukey statistic. "mod-log-likelihood" -1 Modified log-likelihood ratio. "neyman" -2 Neyman's statistic. "cressie-read" 2/3 The power recommended in [5]_. Returns ------- statistic : float or ndarray The Cressie-Read power divergence test statistic. The value is a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D. pvalue : float or ndarray The p-value of the test. The value is a float if `ddof` and the return value `stat` are scalars. See Also -------- chisquare Notes ----- This test is invalid when the observed or expected frequencies in each category are too small. A typical rule is that all of the observed and expected frequencies should be at least 5. When `lambda_` is less than zero, the formula for the statistic involves dividing by `f_obs`, so a warning or error may be generated if any value in `f_obs` is 0. Similarly, a warning or error may be generated if any value in `f_exp` is zero when `lambda_` >= 0. The default degrees of freedom, k-1, are for the case when no parameters of the distribution are estimated. If p parameters are estimated by efficient maximum likelihood then the correct degrees of freedom are k-1-p. If the parameters are estimated in a different way, then the dof can be between k-1-p and k-1. However, it is also possible that the asymptotic distribution is not a chisquare, in which case this test is not appropriate. This function handles masked arrays. If an element of `f_obs` or `f_exp` is masked, then data at that position is ignored, and does not count towards the size of the data set. .. versionadded:: 0.13.0 References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html .. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test .. [3] "G-test", http://en.wikipedia.org/wiki/G-test .. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and practice of statistics in biological research", New York: Freeman (1981) .. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), pp. 440-464. Examples -------- (See `chisquare` for more examples.) When just `f_obs` is given, it is assumed that the expected frequencies are uniform and given by the mean of the observed frequencies. Here we perform a G-test (i.e. use the log-likelihood ratio statistic): >>> from scipy.stats import power_divergence >>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood') (2.006573162632538, 0.84823476779463769) The expected frequencies can be given with the `f_exp` argument: >>> power_divergence([16, 18, 16, 14, 12, 12], ... f_exp=[16, 16, 16, 16, 16, 8], ... lambda_='log-likelihood') (3.3281031458963746, 0.6495419288047497) When `f_obs` is 2-D, by default the test is applied to each column. >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T >>> obs.shape (6, 2) >>> power_divergence(obs, lambda_="log-likelihood") (array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225])) By setting ``axis=None``, the test is applied to all data in the array, which is equivalent to applying the test to the flattened array. >>> power_divergence(obs, axis=None) (23.31034482758621, 0.015975692534127565) >>> power_divergence(obs.ravel()) (23.31034482758621, 0.015975692534127565) `ddof` is the change to make to the default degrees of freedom. >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1) (2.0, 0.73575888234288467) The calculation of the p-values is done by broadcasting the test statistic with `ddof`. >>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared statistics, we must use ``axis=1``: >>> power_divergence([16, 18, 16, 14, 12, 12], ... f_exp=[[16, 16, 16, 16, 16, 8], ... [8, 20, 20, 16, 12, 12]], ... axis=1) (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) """ # Convert the input argument `lambda_` to a numerical value. if isinstance(lambda_, string_types): if lambda_ not in _power_div_lambda_names: names = repr(list(_power_div_lambda_names.keys()))[1:-1] raise ValueError("invalid string for lambda_: {0!r}. Valid strings " "are {1}".format(lambda_, names)) lambda_ = _power_div_lambda_names[lambda_] elif lambda_ is None: lambda_ = 1 f_obs = np.asanyarray(f_obs) if f_exp is not None: f_exp = np.atleast_1d(np.asanyarray(f_exp)) else: # Compute the equivalent of # f_exp = f_obs.mean(axis=axis, keepdims=True) # Older versions of numpy do not have the 'keepdims' argument, so # we have to do a little work to achieve the same result. # Ignore 'invalid' errors so the edge case of a data set with length 0 # is handled without spurious warnings. with np.errstate(invalid='ignore'): f_exp = np.atleast_1d(f_obs.mean(axis=axis)) if axis is not None: reduced_shape = list(f_obs.shape) reduced_shape[axis] = 1 f_exp.shape = reduced_shape # `terms` is the array of terms that are summed along `axis` to create # the test statistic. We use some specialized code for a few special # cases of lambda_. if lambda_ == 1: # Pearson's chi-squared statistic terms = (f_obs - f_exp)**2 / f_exp elif lambda_ == 0: # Log-likelihood ratio (i.e. G-test) terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp) elif lambda_ == -1: # Modified log-likelihood ratio terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs) else: # General Cressie-Read power divergence. terms = f_obs * ((f_obs / f_exp)**lambda_ - 1) terms /= 0.5 * lambda_ * (lambda_ + 1) stat = terms.sum(axis=axis) num_obs = _count(terms, axis=axis) ddof = asarray(ddof) p = distributions.chi2.sf(stat, num_obs - 1 - ddof) return Power_divergenceResult(stat, p) def chisquare(f_obs, f_exp=None, ddof=0, axis=0): """ Calculate a one-way chi square test. The chi square test tests the null hypothesis that the categorical data has the given frequencies. Parameters ---------- f_obs : array_like Observed frequencies in each category. f_exp : array_like, optional Expected frequencies in each category. By default the categories are assumed to be equally likely. ddof : int, optional "Delta degrees of freedom": adjustment to the degrees of freedom for the p-value. The p-value is computed using a chi-squared distribution with ``k - 1 - ddof`` degrees of freedom, where `k` is the number of observed frequencies. The default value of `ddof` is 0. axis : int or None, optional The axis of the broadcast result of `f_obs` and `f_exp` along which to apply the test. If axis is None, all values in `f_obs` are treated as a single data set. Default is 0. Returns ------- chisq : float or ndarray The chi-squared test statistic. The value is a float if `axis` is None or `f_obs` and `f_exp` are 1-D. p : float or ndarray The p-value of the test. The value is a float if `ddof` and the return value `chisq` are scalars. See Also -------- power_divergence mstats.chisquare Notes ----- This test is invalid when the observed or expected frequencies in each category are too small. A typical rule is that all of the observed and expected frequencies should be at least 5. The default degrees of freedom, k-1, are for the case when no parameters of the distribution are estimated. If p parameters are estimated by efficient maximum likelihood then the correct degrees of freedom are k-1-p. If the parameters are estimated in a different way, then the dof can be between k-1-p and k-1. However, it is also possible that the asymptotic distribution is not a chisquare, in which case this test is not appropriate. References ---------- .. [1] Lowry, Richard. "Concepts and Applications of Inferential Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html .. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test Examples -------- When just `f_obs` is given, it is assumed that the expected frequencies are uniform and given by the mean of the observed frequencies. >>> from scipy.stats import chisquare >>> chisquare([16, 18, 16, 14, 12, 12]) (2.0, 0.84914503608460956) With `f_exp` the expected frequencies can be given. >>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8]) (3.5, 0.62338762774958223) When `f_obs` is 2-D, by default the test is applied to each column. >>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T >>> obs.shape (6, 2) >>> chisquare(obs) (array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415])) By setting ``axis=None``, the test is applied to all data in the array, which is equivalent to applying the test to the flattened array. >>> chisquare(obs, axis=None) (23.31034482758621, 0.015975692534127565) >>> chisquare(obs.ravel()) (23.31034482758621, 0.015975692534127565) `ddof` is the change to make to the default degrees of freedom. >>> chisquare([16, 18, 16, 14, 12, 12], ddof=1) (2.0, 0.73575888234288467) The calculation of the p-values is done by broadcasting the chi-squared statistic with `ddof`. >>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2]) (2.0, array([ 0.84914504, 0.73575888, 0.5724067 ])) `f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting `f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared statistics, we use ``axis=1``: >>> chisquare([16, 18, 16, 14, 12, 12], ... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]], ... axis=1) (array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846])) """ return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_="pearson") Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue')) def ks_2samp(data1, data2): """ Compute the Kolmogorov-Smirnov statistic on 2 samples. This is a two-sided test for the null hypothesis that 2 independent samples are drawn from the same continuous distribution. Parameters ---------- data1, data2 : sequence of 1-D ndarrays two arrays of sample observations assumed to be drawn from a continuous distribution, sample sizes can be different Returns ------- statistic : float KS statistic pvalue : float two-tailed p-value Notes ----- This tests whether 2 samples are drawn from the same distribution. Note that, like in the case of the one-sample K-S test, the distribution is assumed to be continuous. This is the two-sided test, one-sided tests are not implemented. The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution. If the K-S statistic is small or the p-value is high, then we cannot reject the hypothesis that the distributions of the two samples are the same. Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) #fix random seed to get the same result >>> n1 = 200 # size of first sample >>> n2 = 300 # size of second sample For a different distribution, we can reject the null hypothesis since the pvalue is below 1%: >>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1) >>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5) >>> stats.ks_2samp(rvs1, rvs2) (0.20833333333333337, 4.6674975515806989e-005) For a slightly different distribution, we cannot reject the null hypothesis at a 10% or lower alpha since the p-value at 0.144 is higher than 10% >>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0) >>> stats.ks_2samp(rvs1, rvs3) (0.10333333333333333, 0.14498781825751686) For an identical distribution, we cannot reject the null hypothesis since the p-value is high, 41%: >>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0) >>> stats.ks_2samp(rvs1, rvs4) (0.07999999999999996, 0.41126949729859719) """ data1 = np.sort(data1) data2 = np.sort(data2) n1 = data1.shape[0] n2 = data2.shape[0] data_all = np.concatenate([data1, data2]) cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1) cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2) d = np.max(np.absolute(cdf1 - cdf2)) # Note: d absolute not signed distance en = np.sqrt(n1 * n2 / float(n1 + n2)) try: prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d) except: prob = 1.0 return Ks_2sampResult(d, prob) def tiecorrect(rankvals): """ Tie correction factor for ties in the Mann-Whitney U and Kruskal-Wallis H tests. Parameters ---------- rankvals : array_like A 1-D sequence of ranks. Typically this will be the array returned by `stats.rankdata`. Returns ------- factor : float Correction factor for U or H. See Also -------- rankdata : Assign ranks to the data mannwhitneyu : Mann-Whitney rank test kruskal : Kruskal-Wallis H test References ---------- .. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences. New York: McGraw-Hill. Examples -------- >>> from scipy.stats import tiecorrect, rankdata >>> tiecorrect([1, 2.5, 2.5, 4]) 0.9 >>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4]) >>> ranks array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5]) >>> tiecorrect(ranks) 0.9833333333333333 """ arr = np.sort(rankvals) idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0] cnt = np.diff(idx).astype(np.float64) size = np.float64(arr.size) return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size) MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) def mannwhitneyu(x, y, use_continuity=True, alternative=None): """ Compute the Mann-Whitney rank test on samples x and y. Parameters ---------- x, y : array_like Array of samples, should be one-dimensional. use_continuity : bool, optional Whether a continuity correction (1/2.) should be taken into account. Default is True. alternative : None (deprecated), 'less', 'two-sided', or 'greater' Whether to get the p-value for the one-sided hypothesis ('less' or 'greater') or for the two-sided hypothesis ('two-sided'). Defaults to None, which results in a p-value half the size of the 'two-sided' p-value and a different U statistic. The default behavior is not the same as using 'less' or 'greater': it only exists for backward compatibility and is deprecated. Returns ------- statistic : float The Mann-Whitney U statistic, equal to min(U for x, U for y) if `alternative` is equal to None (deprecated; exists for backward compatibility), and U for y otherwise. pvalue : float p-value assuming an asymptotic normal distribution. One-sided or two-sided, depending on the choice of `alternative`. Notes ----- Use only when the number of observation in each sample is > 20 and you have 2 independent samples of ranks. Mann-Whitney U is significant if the u-obtained is LESS THAN or equal to the critical value of U. This test corrects for ties and by default uses a continuity correction. References ---------- .. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test .. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random Variables is Stochastically Larger than the Other," The Annals of Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947. """ if alternative is None: warnings.warn("Calling `mannwhitneyu` without specifying " "`alternative` is deprecated.", DeprecationWarning) x = np.asarray(x) y = np.asarray(y) n1 = len(x) n2 = len(y) ranked = rankdata(np.concatenate((x, y))) rankx = ranked[0:n1] # get the x-ranks u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x u2 = n1*n2 - u1 # remainder is U for y T = tiecorrect(ranked) if T == 0: raise ValueError('All numbers are identical in mannwhitneyu') sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0) meanrank = n1*n2/2.0 + 0.5 * use_continuity if alternative is None or alternative == 'two-sided': bigu = max(u1, u2) elif alternative == 'less': bigu = u1 elif alternative == 'greater': bigu = u2 else: raise ValueError("alternative should be None, 'less', 'greater' " "or 'two-sided'") z = (bigu - meanrank) / sd if alternative is None: # This behavior, equal to half the size of the two-sided # p-value, is deprecated. p = distributions.norm.sf(abs(z)) elif alternative == 'two-sided': p = 2 * distributions.norm.sf(abs(z)) else: p = distributions.norm.sf(z) u = u2 # This behavior is deprecated. if alternative is None: u = min(u1, u2) return MannwhitneyuResult(u, p) RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue')) def ranksums(x, y): """ Compute the Wilcoxon rank-sum statistic for two samples. The Wilcoxon rank-sum test tests the null hypothesis that two sets of measurements are drawn from the same distribution. The alternative hypothesis is that values in one sample are more likely to be larger than the values in the other sample. This test should be used to compare two samples from continuous distributions. It does not handle ties between measurements in x and y. For tie-handling and an optional continuity correction see `scipy.stats.mannwhitneyu`. Parameters ---------- x,y : array_like The data from the two samples Returns ------- statistic : float The test statistic under the large-sample approximation that the rank sum statistic is normally distributed pvalue : float The two-sided p-value of the test References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test """ x, y = map(np.asarray, (x, y)) n1 = len(x) n2 = len(y) alldata = np.concatenate((x, y)) ranked = rankdata(alldata) x = ranked[:n1] s = np.sum(x, axis=0) expected = n1 * (n1+n2+1) / 2.0 z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0) prob = 2 * distributions.norm.sf(abs(z)) return RanksumsResult(z, prob) KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) def kruskal(*args, **kwargs): """ Compute the Kruskal-Wallis H-test for independent samples The Kruskal-Wallis H-test tests the null hypothesis that the population median of all of the groups are equal. It is a non-parametric version of ANOVA. The test works on 2 or more independent samples, which may have different sizes. Note that rejecting the null hypothesis does not indicate which of the groups differs. Post-hoc comparisons between groups are required to determine which groups are different. Parameters ---------- sample1, sample2, ... : array_like Two or more arrays with the sample measurements can be given as arguments. nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- statistic : float The Kruskal-Wallis H statistic, corrected for ties pvalue : float The p-value for the test using the assumption that H has a chi square distribution See Also -------- f_oneway : 1-way ANOVA mannwhitneyu : Mann-Whitney rank test on two samples. friedmanchisquare : Friedman test for repeated measurements Notes ----- Due to the assumption that H has a chi square distribution, the number of samples in each group must not be too small. A typical rule is that each sample must have at least 5 measurements. References ---------- .. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in One-Criterion Variance Analysis", Journal of the American Statistical Association, Vol. 47, Issue 260, pp. 583-621, 1952. .. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance Examples -------- >>> from scipy import stats >>> x = [1, 3, 5, 7, 9] >>> y = [2, 4, 6, 8, 10] >>> stats.kruskal(x, y) KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895) >>> x = [1, 1, 1] >>> y = [2, 2, 2] >>> z = [2, 2] >>> stats.kruskal(x, y, z) KruskalResult(statistic=7.0, pvalue=0.0301973834223185) """ args = list(map(np.asarray, args)) num_groups = len(args) if num_groups < 2: raise ValueError("Need at least two groups in stats.kruskal()") for arg in args: if arg.size == 0: return KruskalResult(np.nan, np.nan) n = np.asarray(list(map(len, args))) if 'nan_policy' in kwargs.keys(): if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'): raise ValueError("nan_policy must be 'propagate', " "'raise' or'omit'") else: nan_policy = kwargs['nan_policy'] else: nan_policy = 'propagate' contains_nan = False for arg in args: cn = _contains_nan(arg, nan_policy) if cn[0]: contains_nan = True break if contains_nan and nan_policy == 'omit': for a in args: a = ma.masked_invalid(a) return mstats_basic.kruskal(*args) if contains_nan and nan_policy == 'propagate': return KruskalResult(np.nan, np.nan) alldata = np.concatenate(args) ranked = rankdata(alldata) ties = tiecorrect(ranked) if ties == 0: raise ValueError('All numbers are identical in kruskal') # Compute sum^2/n for each group and sum j = np.insert(np.cumsum(n), 0, 0) ssbn = 0 for i in range(num_groups): ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i]) totaln = np.sum(n) h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1) df = num_groups - 1 h /= ties return KruskalResult(h, distributions.chi2.sf(h, df)) FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', ('statistic', 'pvalue')) def friedmanchisquare(*args): """ Compute the Friedman test for repeated measurements The Friedman test tests the null hypothesis that repeated measurements of the same individuals have the same distribution. It is often used to test for consistency among measurements obtained in different ways. For example, if two measurement techniques are used on the same set of individuals, the Friedman test can be used to determine if the two measurement techniques are consistent. Parameters ---------- measurements1, measurements2, measurements3... : array_like Arrays of measurements. All of the arrays must have the same number of elements. At least 3 sets of measurements must be given. Returns ------- statistic : float the test statistic, correcting for ties pvalue : float the associated p-value assuming that the test statistic has a chi squared distribution Notes ----- Due to the assumption that the test statistic has a chi squared distribution, the p-value is only reliable for n > 10 and more than 6 repeated measurements. References ---------- .. [1] http://en.wikipedia.org/wiki/Friedman_test """ k = len(args) if k < 3: raise ValueError('Less than 3 levels. Friedman test not appropriate.') n = len(args[0]) for i in range(1, k): if len(args[i]) != n: raise ValueError('Unequal N in friedmanchisquare. Aborting.') # Rank data data = np.vstack(args).T data = data.astype(float) for i in range(len(data)): data[i] = rankdata(data[i]) # Handle ties ties = 0 for i in range(len(data)): replist, repnum = find_repeats(array(data[i])) for t in repnum: ties += t * (t*t - 1) c = 1 - ties / float(k*(k*k - 1)*n) ssbn = np.sum(data.sum(axis=0)**2) chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1)) def combine_pvalues(pvalues, method='fisher', weights=None): """ Methods for combining the p-values of independent tests bearing upon the same hypothesis. Parameters ---------- pvalues : array_like, 1-D Array of p-values assumed to come from independent tests. method : {'fisher', 'stouffer'}, optional Name of method to use to combine p-values. The following methods are available: - "fisher": Fisher's method (Fisher's combined probability test), the default. - "stouffer": Stouffer's Z-score method. weights : array_like, 1-D, optional Optional array of weights used only for Stouffer's Z-score method. Returns ------- statistic: float The statistic calculated by the specified method: - "fisher": The chi-squared statistic - "stouffer": The Z-score pval: float The combined p-value. Notes ----- Fisher's method (also known as Fisher's combined probability test) [1]_ uses a chi-squared statistic to compute a combined p-value. The closely related Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The advantage of Stouffer's method is that it is straightforward to introduce weights, which can make Stouffer's method more powerful than Fisher's method when the p-values are from studies of different size [3]_ [4]_. Fisher's method may be extended to combine p-values from dependent tests [5]_. Extensions such as Brown's method and Kost's method are not currently implemented. .. versionadded:: 0.15.0 References ---------- .. [1] https://en.wikipedia.org/wiki/Fisher%27s_method .. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method .. [3] Whitlock, M. C. "Combining probability from independent tests: the weighted Z-method is superior to Fisher's approach." Journal of Evolutionary Biology 18, no. 5 (2005): 1368-1373. .. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method for combining probabilities in meta-analysis." Journal of Evolutionary Biology 24, no. 8 (2011): 1836-1841. .. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method """ pvalues = np.asarray(pvalues) if pvalues.ndim != 1: raise ValueError("pvalues is not 1-D") if method == 'fisher': Xsq = -2 * np.sum(np.log(pvalues)) pval = distributions.chi2.sf(Xsq, 2 * len(pvalues)) return (Xsq, pval) elif method == 'stouffer': if weights is None: weights = np.ones_like(pvalues) elif len(weights) != len(pvalues): raise ValueError("pvalues and weights must be of the same size.") weights = np.asarray(weights) if weights.ndim != 1: raise ValueError("weights is not 1-D") Zi = distributions.norm.isf(pvalues) Z = np.dot(weights, Zi) / np.linalg.norm(weights) pval = distributions.norm.sf(Z) return (Z, pval) else: raise ValueError( "Invalid method '%s'. Options are 'fisher' or 'stouffer'", method) ##################################### # PROBABILITY CALCULATIONS # ##################################### def _betai(a, b, x): x = np.asarray(x) x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 return special.betainc(a, b, x) ##################################### # STATISTICAL DISTANCES # ##################################### def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None): r""" Compute the first Wasserstein distance between two 1D distributions. This distance is also known as the earth mover's distance, since it can be seen as the minimum amount of "work" required to transform :math:`u` into :math:`v`, where "work" is measured as the amount of distribution weight that must be moved, multiplied by the distance it has to be moved. .. versionadded:: 1.0.0 Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The first Wasserstein distance between the distributions :math:`u` and :math:`v` is: .. math:: l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times \mathbb{R}} |x-y| \mathrm{d} \pi (x, y) where :math:`\Gamma (u, v)` is the set of (probability) distributions on :math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and :math:`v` on the first and second factors respectively. If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and :math:`v`, this distance also equals to: .. math:: l_1(u, v) = \int_{-\infty}^{+\infty} |U-V| See [2]_ for a proof of the equivalence of both definitions. The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] "Wasserstein metric", http://en.wikipedia.org/wiki/Wasserstein_metric .. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`. Examples -------- >>> from scipy.stats import wasserstein_distance >>> wasserstein_distance([0, 1, 3], [5, 6, 8]) 5.0 >>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2]) 0.25 >>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4], ... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5]) 4.0781331438047861 """ return _cdf_distance(1, u_values, v_values, u_weights, v_weights) def energy_distance(u_values, v_values, u_weights=None, v_weights=None): r""" Compute the energy distance between two 1D distributions. .. versionadded:: 1.0.0 Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The energy distance between two distributions :math:`u` and :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, equals to: .. math:: D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| - \mathbb E|Y - Y'| \right)^{1/2} where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are independent random variables whose probability distribution is :math:`u` (resp. :math:`v`). As shown in [2]_, for one-dimensional real-valued variables, the energy distance is linked to the non-distribution-free version of the Cramer-von Mises distance: .. math:: D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2 \right)^{1/2} Note that the common Cramer-von Mises criterion uses the distribution-free version of the distance. See [2]_ (section 2), for more details about both versions of the distance. The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance .. [2] Szekely "E-statistics: The energy of statistical samples." Bowling Green State University, Department of Mathematics and Statistics, Technical Report 02-16 (2002). .. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews: Computational Statistics, 8(1):27-38 (2015). .. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, Munos "The Cramer Distance as a Solution to Biased Wasserstein Gradients" (2017). :arXiv:`1705.10743`. Examples -------- >>> from scipy.stats import energy_distance >>> energy_distance([0], [2]) 2.0000000000000004 >>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2]) 1.0000000000000002 >>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ], ... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8]) 0.88003340976158217 """ return np.sqrt(2) * _cdf_distance(2, u_values, v_values, u_weights, v_weights) def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None): r""" Compute, between two one-dimensional distributions :math:`u` and :math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the statistical distance that is defined as: .. math:: l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p} p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2 gives the energy distance. Parameters ---------- u_values, v_values : array_like Values observed in the (empirical) distribution. u_weights, v_weights : array_like, optional Weight for each value. If unspecified, each value is assigned the same weight. `u_weights` (resp. `v_weights`) must have the same length as `u_values` (resp. `v_values`). If the weight sum differs from 1, it must still be positive and finite so that the weights can be normalized to sum to 1. Returns ------- distance : float The computed distance between the distributions. Notes ----- The input distributions can be empirical, therefore coming from samples whose values are effectively inputs of the function, or they can be seen as generalized functions, in which case they are weighted sums of Dirac delta functions located at the specified values. References ---------- .. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer, Munos "The Cramer Distance as a Solution to Biased Wasserstein Gradients" (2017). :arXiv:`1705.10743`. """ u_values, u_weights = _validate_distribution(u_values, u_weights) v_values, v_weights = _validate_distribution(v_values, v_weights) u_sorter = np.argsort(u_values) v_sorter = np.argsort(v_values) all_values = np.concatenate((u_values, v_values)) all_values.sort(kind='mergesort') # Compute the differences between pairs of successive values of u and v. deltas = np.diff(all_values) # Get the respective positions of the values of u and v among the values of # both distributions. u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right') v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right') # Calculate the CDFs of u and v using their weights, if specified. if u_weights is None: u_cdf = u_cdf_indices / u_values.size else: u_sorted_cumweights = np.concatenate(([0], np.cumsum(u_weights[u_sorter]))) u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1] if v_weights is None: v_cdf = v_cdf_indices / v_values.size else: v_sorted_cumweights = np.concatenate(([0], np.cumsum(v_weights[v_sorter]))) v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1] # Compute the value of the integral based on the CDFs. # If p = 1 or p = 2, we avoid using np.power, which introduces an overhead # of about 15%. if p == 1: return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas)) if p == 2: return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas))) return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p), deltas)), 1/p) def _validate_distribution(values, weights): """ Validate the values and weights from a distribution input of `cdf_distance` and return them as ndarray objects. Parameters ---------- values : array_like Values observed in the (empirical) distribution. weights : array_like Weight for each value. Returns ------- values : ndarray Values as ndarray. weights : ndarray Weights as ndarray. """ # Validate the value array. values = np.asarray(values, dtype=float) if len(values) == 0: raise ValueError("Distribution can't be empty.") # Validate the weight array, if specified. if weights is not None: weights = np.asarray(weights, dtype=float) if len(weights) != len(values): raise ValueError('Value and weight array-likes for the same ' 'empirical distribution must be of the same size.') if np.any(weights < 0): raise ValueError('All weights must be non-negative.') if not 0 < np.sum(weights) < np.inf: raise ValueError('Weight array-like sum must be positive and ' 'finite. Set as None for an equal distribution of ' 'weight.') return values, weights return values, None ##################################### # SUPPORT FUNCTIONS # ##################################### RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts')) def find_repeats(arr): """ Find repeats and repeat counts. Parameters ---------- arr : array_like Input array. This is cast to float64. Returns ------- values : ndarray The unique values from the (flattened) input that are repeated. counts : ndarray Number of times the corresponding 'value' is repeated. Notes ----- In numpy >= 1.9 `numpy.unique` provides similar functionality. The main difference is that `find_repeats` only returns repeated values. Examples -------- >>> from scipy import stats >>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5]) RepeatedResults(values=array([2.]), counts=array([4])) >>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) RepeatedResults(values=array([4., 5.]), counts=array([2, 2])) """ # Note: always copies. return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64))) def _sum_of_squares(a, axis=0): """ Square each element of the input array, and return the sum(s) of that. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array `a`. Returns ------- sum_of_squares : ndarray The sum along the given axis for (a**2). See also -------- _square_of_sums : The square(s) of the sum(s) (the opposite of `_sum_of_squares`). """ a, axis = _chk_asarray(a, axis) return np.sum(a*a, axis) def _square_of_sums(a, axis=0): """ Sum elements of the input array, and return the square(s) of that sum. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate. Default is 0. If None, compute over the whole array `a`. Returns ------- square_of_sums : float or ndarray The square of the sum over `axis`. See also -------- _sum_of_squares : The sum of squares (the opposite of `square_of_sums`). """ a, axis = _chk_asarray(a, axis) s = np.sum(a, axis) if not np.isscalar(s): return s.astype(float) * s else: return float(s) * s def rankdata(a, method='average'): """ Assign ranks to data, dealing with ties appropriately. Ranks begin at 1. The `method` argument controls how ranks are assigned to equal values. See [1]_ for further discussion of ranking methods. Parameters ---------- a : array_like The array of values to be ranked. The array is first flattened. method : str, optional The method used to assign ranks to tied elements. The options are 'average', 'min', 'max', 'dense' and 'ordinal'. 'average': The average of the ranks that would have been assigned to all the tied values is assigned to each value. 'min': The minimum of the ranks that would have been assigned to all the tied values is assigned to each value. (This is also referred to as "competition" ranking.) 'max': The maximum of the ranks that would have been assigned to all the tied values is assigned to each value. 'dense': Like 'min', but the rank of the next highest element is assigned the rank immediately after those assigned to the tied elements. 'ordinal': All values are given a distinct rank, corresponding to the order that the values occur in `a`. The default is 'average'. Returns ------- ranks : ndarray An array of length equal to the size of `a`, containing rank scores. References ---------- .. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking Examples -------- >>> from scipy.stats import rankdata >>> rankdata([0, 2, 3, 2]) array([ 1. , 2.5, 4. , 2.5]) >>> rankdata([0, 2, 3, 2], method='min') array([ 1, 2, 4, 2]) >>> rankdata([0, 2, 3, 2], method='max') array([ 1, 3, 4, 3]) >>> rankdata([0, 2, 3, 2], method='dense') array([ 1, 2, 3, 2]) >>> rankdata([0, 2, 3, 2], method='ordinal') array([ 1, 2, 4, 3]) """ if method not in ('average', 'min', 'max', 'dense', 'ordinal'): raise ValueError('unknown method "{0}"'.format(method)) arr = np.ravel(np.asarray(a)) algo = 'mergesort' if method == 'ordinal' else 'quicksort' sorter = np.argsort(arr, kind=algo) inv = np.empty(sorter.size, dtype=np.intp) inv[sorter] = np.arange(sorter.size, dtype=np.intp) if method == 'ordinal': return inv + 1 arr = arr[sorter] obs = np.r_[True, arr[1:] != arr[:-1]] dense = obs.cumsum()[inv] if method == 'dense': return dense # cumulative counts of each unique value count = np.r_[np.nonzero(obs)[0], len(obs)] if method == 'max': return count[dense] if method == 'min': return count[dense - 1] + 1 # average method return .5 * (count[dense] + count[dense - 1] + 1)
194,013
32.818023
97
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_stats_mstats_common.py
from collections import namedtuple import numpy as np from . import distributions __all__ = ['_find_repeats', 'linregress', 'theilslopes'] LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')) def linregress(x, y=None): """ Calculate a linear least-squares regression for two sets of measurements. Parameters ---------- x, y : array_like Two sets of measurements. Both arrays should have the same length. If only x is given (and y=None), then it must be a two-dimensional array where one dimension has length 2. The two sets of measurements are then found by splitting the array along the length-2 dimension. Returns ------- slope : float slope of the regression line intercept : float intercept of the regression line rvalue : float correlation coefficient pvalue : float two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero, using Wald Test with t-distribution of the test statistic. stderr : float Standard error of the estimated gradient. See also -------- :func:`scipy.optimize.curve_fit` : Use non-linear least squares to fit a function to data. :func:`scipy.optimize.leastsq` : Minimize the sum of squares of a set of equations. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> np.random.seed(12345678) >>> x = np.random.random(10) >>> y = np.random.random(10) >>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) To get coefficient of determination (r_squared) >>> print("r-squared:", r_value**2) r-squared: 0.08040226853902833 Plot the data along with the fitted line >>> plt.plot(x, y, 'o', label='original data') >>> plt.plot(x, intercept + slope*x, 'r', label='fitted line') >>> plt.legend() >>> plt.show() """ TINY = 1.0e-20 if y is None: # x is a (2, N) or (N, 2) shaped array_like x = np.asarray(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: msg = ("If only `x` is given as input, it has to be of shape " "(2, N) or (N, 2), provided shape was %s" % str(x.shape)) raise ValueError(msg) else: x = np.asarray(x) y = np.asarray(y) if x.size == 0 or y.size == 0: raise ValueError("Inputs must not be empty.") n = len(x) xmean = np.mean(x, None) ymean = np.mean(y, None) # average sum of squares: ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat r_num = ssxym r_den = np.sqrt(ssxm * ssym) if r_den == 0.0: r = 0.0 else: r = r_num / r_den # test for numerical error propagation if r > 1.0: r = 1.0 elif r < -1.0: r = -1.0 df = n - 2 slope = r_num / ssxm intercept = ymean - slope*xmean if n == 2: # handle case when only two points are passed in if y[0] == y[1]: prob = 1.0 else: prob = 0.0 sterrest = 0.0 else: t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY))) prob = 2 * distributions.t.sf(np.abs(t), df) sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df) return LinregressResult(slope, intercept, r, prob, sterrest) def theilslopes(y, x=None, alpha=0.95): r""" Computes the Theil-Sen estimator for a set of points (x, y). `theilslopes` implements a method for robust linear regression. It computes the slope as the median of all slopes between paired values. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. alpha : float, optional Confidence degree between 0 and 1. Default is 95% confidence. Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are interpreted as "find the 90% confidence interval". Returns ------- medslope : float Theil slope. medintercept : float Intercept of the Theil line, as ``median(y) - medslope*median(x)``. lo_slope : float Lower bound of the confidence interval on `medslope`. up_slope : float Upper bound of the confidence interval on `medslope`. Notes ----- The implementation of `theilslopes` follows [1]_. The intercept is not defined in [1]_, and here it is defined as ``median(y) - medslope*median(x)``, which is given in [3]_. Other definitions of the intercept exist in the literature. A confidence interval for the intercept is not given as this question is not addressed in [1]_. References ---------- .. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau", J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968. .. [2] H. Theil, "A rank-invariant method of linear and polynomial regression analysis I, II and III", Nederl. Akad. Wetensch., Proc. 53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950. .. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed., John Wiley and Sons, New York, pp. 493. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> x = np.linspace(-5, 5, num=150) >>> y = x + np.random.normal(size=x.size) >>> y[11:15] += 10 # add outliers >>> y[-5:] -= 7 Compute the slope, intercept and 90% confidence interval. For comparison, also compute the least-squares fit with `linregress`: >>> res = stats.theilslopes(y, x, 0.90) >>> lsq_res = stats.linregress(x, y) Plot the results. The Theil-Sen regression line is shown in red, with the dashed red lines illustrating the confidence interval of the slope (note that the dashed red lines are not the confidence interval of the regression as the confidence interval of the intercept is not included). The green line shows the least-squares fit for comparison. >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, y, 'b.') >>> ax.plot(x, res[1] + res[0] * x, 'r-') >>> ax.plot(x, res[1] + res[2] * x, 'r--') >>> ax.plot(x, res[1] + res[3] * x, 'r--') >>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-') >>> plt.show() """ # We copy both x and y so we can use _find_repeats. y = np.array(y).flatten() if x is None: x = np.arange(len(y), dtype=float) else: x = np.array(x, dtype=float).flatten() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x))) # Compute sorted slopes only when deltax > 0 deltax = x[:, np.newaxis] - x deltay = y[:, np.newaxis] - y slopes = deltay[deltax > 0] / deltax[deltax > 0] slopes.sort() medslope = np.median(slopes) medinter = np.median(y) - medslope * np.median(x) # Now compute confidence intervals if alpha > 0.5: alpha = 1. - alpha z = distributions.norm.ppf(alpha / 2.) # This implements (2.6) from Sen (1968) _, nxreps = _find_repeats(x) _, nyreps = _find_repeats(y) nt = len(slopes) # N in Sen (1968) ny = len(y) # n in Sen (1968) # Equation 2.6 in Sen (1968): sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) - sum(k * (k-1) * (2*k + 5) for k in nxreps) - sum(k * (k-1) * (2*k + 5) for k in nyreps)) # Find the confidence interval indices in `slopes` sigma = np.sqrt(sigsq) Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1) Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0) delta = slopes[[Rl, Ru]] return medslope, medinter, delta[0], delta[1] def _find_repeats(arr): # This function assumes it may clobber its input. if len(arr) == 0: return np.array(0, np.float64), np.array(0, np.intp) # XXX This cast was previously needed for the Fortran implementation, # should we ditch it? arr = np.asarray(arr, np.float64).ravel() arr.sort() # Taken from NumPy 1.9's np.unique. change = np.concatenate(([True], arr[1:] != arr[:-1])) unique = arr[change] change_idx = np.concatenate(np.nonzero(change) + ([arr.size],)) freq = np.diff(change_idx) atleast2 = freq > 1 return unique[atleast2], freq[atleast2]
8,684
32.532819
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/mstats_extras.py
""" Additional statistics functions with support for masked arrays. """ # Original author (2007): Pierre GF Gerard-Marchant from __future__ import division, print_function, absolute_import __all__ = ['compare_medians_ms', 'hdquantiles', 'hdmedian', 'hdquantiles_sd', 'idealfourths', 'median_cihs','mjci','mquantiles_cimj', 'rsh', 'trimmed_mean_ci',] import numpy as np from numpy import float_, int_, ndarray import numpy.ma as ma from numpy.ma import MaskedArray from . import mstats_basic as mstats from scipy.stats.distributions import norm, beta, t, binom def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,): """ Computes quantile estimates with the Harrell-Davis method. The quantile estimates are calculated as a weighted linear combination of order statistics. Parameters ---------- data : array_like Data array. prob : sequence, optional Sequence of quantiles to compute. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. var : bool, optional Whether to return the variance of the estimate. Returns ------- hdquantiles : MaskedArray A (p,) array of quantiles (if `var` is False), or a (2,p) array of quantiles and variances (if `var` is True), where ``p`` is the number of quantiles. See Also -------- hdquantiles_sd """ def _hd_1D(data,prob,var): "Computes the HD quantiles for a 1D array. Returns nan for invalid data." xsorted = np.squeeze(np.sort(data.compressed().view(ndarray))) # Don't use length here, in case we have a numpy scalar n = xsorted.size hd = np.empty((2,len(prob)), float_) if n < 2: hd.flat = np.nan if var: return hd return hd[0] v = np.arange(n+1) / float(n) betacdf = beta.cdf for (i,p) in enumerate(prob): _w = betacdf(v, (n+1)*p, (n+1)*(1-p)) w = _w[1:] - _w[:-1] hd_mean = np.dot(w, xsorted) hd[0,i] = hd_mean # hd[1,i] = np.dot(w, (xsorted-hd_mean)**2) # hd[0, prob == 0] = xsorted[0] hd[0, prob == 1] = xsorted[-1] if var: hd[1, prob == 0] = hd[1, prob == 1] = np.nan return hd return hd[0] # Initialization & checks data = ma.array(data, copy=False, dtype=float_) p = np.array(prob, copy=False, ndmin=1) # Computes quantiles along axis (or globally) if (axis is None) or (data.ndim == 1): result = _hd_1D(data, p, var) else: if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) result = ma.apply_along_axis(_hd_1D, axis, data, p, var) return ma.fix_invalid(result, copy=False) def hdmedian(data, axis=-1, var=False): """ Returns the Harrell-Davis estimate of the median along the given axis. Parameters ---------- data : ndarray Data array. axis : int, optional Axis along which to compute the quantiles. If None, use a flattened array. var : bool, optional Whether to return the variance of the estimate. Returns ------- hdmedian : MaskedArray The median values. If ``var=True``, the variance is returned inside the masked array. E.g. for a 1-D array the shape change from (1,) to (2,). """ result = hdquantiles(data,[0.5], axis=axis, var=var) return result.squeeze() def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None): """ The standard error of the Harrell-Davis quantile estimates by jackknife. Parameters ---------- data : array_like Data array. prob : sequence, optional Sequence of quantiles to compute. axis : int, optional Axis along which to compute the quantiles. If None, use a flattened array. Returns ------- hdquantiles_sd : MaskedArray Standard error of the Harrell-Davis quantile estimates. See Also -------- hdquantiles """ def _hdsd_1D(data, prob): "Computes the std error for 1D arrays." xsorted = np.sort(data.compressed()) n = len(xsorted) hdsd = np.empty(len(prob), float_) if n < 2: hdsd.flat = np.nan vv = np.arange(n) / float(n-1) betacdf = beta.cdf for (i,p) in enumerate(prob): _w = betacdf(vv, (n+1)*p, (n+1)*(1-p)) w = _w[1:] - _w[:-1] mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)), list(range(k+1,n))].astype(int_)]) for k in range(n)], dtype=float_) mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1) hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n)) return hdsd # Initialization & checks data = ma.array(data, copy=False, dtype=float_) p = np.array(prob, copy=False, ndmin=1) # Computes quantiles along axis (or globally) if (axis is None): result = _hdsd_1D(data, p) else: if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) result = ma.apply_along_axis(_hdsd_1D, axis, data, p) return ma.fix_invalid(result, copy=False).ravel() def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True), alpha=0.05, axis=None): """ Selected confidence interval of the trimmed mean along the given axis. Parameters ---------- data : array_like Input data. limits : {None, tuple}, optional None or a two item tuple. Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. If ``n`` is the number of unmasked data before trimming, then (``n * limits[0]``)th smallest data and (``n * limits[1]``)th largest data are masked. The total number of unmasked data after trimming is ``n * (1. - sum(limits))``. The value of one limit can be set to None to indicate an open interval. Defaults to (0.2, 0.2). inclusive : (2,) tuple of boolean, optional If relative==False, tuple indicating whether values exactly equal to the absolute limits are allowed. If relative==True, tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). Defaults to (True, True). alpha : float, optional Confidence level of the intervals. Defaults to 0.05. axis : int, optional Axis along which to cut. If None, uses a flattened version of `data`. Defaults to None. Returns ------- trimmed_mean_ci : (2,) ndarray The lower and upper confidence intervals of the trimmed data. """ data = ma.array(data, copy=False) trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis) tmean = trimmed.mean(axis) tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis) df = trimmed.count(axis) - 1 tppf = t.ppf(1-alpha/2.,df) return np.array((tmean - tppf*tstde, tmean+tppf*tstde)) def mjci(data, prob=[0.25,0.5,0.75], axis=None): """ Returns the Maritz-Jarrett estimators of the standard error of selected experimental quantiles of the data. Parameters ---------- data : ndarray Data array. prob : sequence, optional Sequence of quantiles to compute. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. """ def _mjci_1D(data, p): data = np.sort(data.compressed()) n = data.size prob = (np.array(p) * n + 0.5).astype(int_) betacdf = beta.cdf mj = np.empty(len(prob), float_) x = np.arange(1,n+1, dtype=float_) / n y = x - 1./n for (i,m) in enumerate(prob): W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m) C1 = np.dot(W,data) C2 = np.dot(W,data**2) mj[i] = np.sqrt(C2 - C1**2) return mj data = ma.array(data, copy=False) if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) p = np.array(prob, copy=False, ndmin=1) # Computes quantiles along axis (or globally) if (axis is None): return _mjci_1D(data, p) else: return ma.apply_along_axis(_mjci_1D, axis, data, p) def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None): """ Computes the alpha confidence interval for the selected quantiles of the data, with Maritz-Jarrett estimators. Parameters ---------- data : ndarray Data array. prob : sequence, optional Sequence of quantiles to compute. alpha : float, optional Confidence level of the intervals. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. Returns ------- ci_lower : ndarray The lower boundaries of the confidence interval. Of the same length as `prob`. ci_upper : ndarray The upper boundaries of the confidence interval. Of the same length as `prob`. """ alpha = min(alpha, 1 - alpha) z = norm.ppf(1 - alpha/2.) xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis) smj = mjci(data, prob, axis=axis) return (xq - z * smj, xq + z * smj) def median_cihs(data, alpha=0.05, axis=None): """ Computes the alpha-level confidence interval for the median of the data. Uses the Hettmasperger-Sheather method. Parameters ---------- data : array_like Input data. Masked values are discarded. The input should be 1D only, or `axis` should be set to None. alpha : float, optional Confidence level of the intervals. axis : int or None, optional Axis along which to compute the quantiles. If None, use a flattened array. Returns ------- median_cihs Alpha level confidence interval. """ def _cihs_1D(data, alpha): data = np.sort(data.compressed()) n = len(data) alpha = min(alpha, 1-alpha) k = int(binom._ppf(alpha/2., n, 0.5)) gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) if gk < 1-alpha: k -= 1 gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5) gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5) I = (gk - 1 + alpha)/(gk - gkk) lambd = (n-k) * I / float(k + (n-2*k)*I) lims = (lambd*data[k] + (1-lambd)*data[k-1], lambd*data[n-k-1] + (1-lambd)*data[n-k]) return lims data = ma.array(data, copy=False) # Computes quantiles along axis (or globally) if (axis is None): result = _cihs_1D(data, alpha) else: if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) result = ma.apply_along_axis(_cihs_1D, axis, data, alpha) return result def compare_medians_ms(group_1, group_2, axis=None): """ Compares the medians from two independent groups along the given axis. The comparison is performed using the McKean-Schrader estimate of the standard error of the medians. Parameters ---------- group_1 : array_like First dataset. Has to be of size >=7. group_2 : array_like Second dataset. Has to be of size >=7. axis : int, optional Axis along which the medians are estimated. If None, the arrays are flattened. If `axis` is not None, then `group_1` and `group_2` should have the same shape. Returns ------- compare_medians_ms : {float, ndarray} If `axis` is None, then returns a float, otherwise returns a 1-D ndarray of floats with a length equal to the length of `group_1` along `axis`. """ (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis)) (std_1, std_2) = (mstats.stde_median(group_1, axis=axis), mstats.stde_median(group_2, axis=axis)) W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2) return 1 - norm.cdf(W) def idealfourths(data, axis=None): """ Returns an estimate of the lower and upper quartiles. Uses the ideal fourths algorithm. Parameters ---------- data : array_like Input array. axis : int, optional Axis along which the quartiles are estimated. If None, the arrays are flattened. Returns ------- idealfourths : {list of floats, masked array} Returns the two internal values that divide `data` into four parts using the ideal fourths algorithm either along the flattened array (if `axis` is None) or along `axis` of `data`. """ def _idf(data): x = data.compressed() n = len(x) if n < 3: return [np.nan,np.nan] (j,h) = divmod(n/4. + 5/12.,1) j = int(j) qlo = (1-h)*x[j-1] + h*x[j] k = n - j qup = (1-h)*x[k] + h*x[k-1] return [qlo, qup] data = ma.sort(data, axis=axis).view(MaskedArray) if (axis is None): return _idf(data) else: return ma.apply_along_axis(_idf, axis, data) def rsh(data, points=None): """ Evaluates Rosenblatt's shifted histogram estimators for each data point. Rosenblatt's estimator is a centered finite-difference approximation to the derivative of the empirical cumulative distribution function. Parameters ---------- data : sequence Input data, should be 1-D. Masked values are ignored. points : sequence or None, optional Sequence of points where to evaluate Rosenblatt shifted histogram. If None, use the data. """ data = ma.array(data, copy=False) if points is None: points = data else: points = np.array(points, copy=False, ndmin=1) if data.ndim != 1: raise AttributeError("The input array should be 1D only !") n = data.count() r = idealfourths(data, axis=None) h = 1.2 * (r[-1]-r[0]) / n**(1./5) nhi = (data[:,None] <= points[None,:] + h).sum(0) nlo = (data[:,None] < points[None,:] - h).sum(0) return (nhi-nlo) / (2.*n*h)
14,957
30.292887
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/mstats.py
""" =================================================================== Statistical functions for masked arrays (:mod:`scipy.stats.mstats`) =================================================================== .. currentmodule:: scipy.stats.mstats This module contains a large number of statistical functions that can be used with masked arrays. Most of these functions are similar to those in scipy.stats but might have small differences in the API or in the algorithm used. Since this is a relatively new package, some API changes are still possible. .. autosummary:: :toctree: generated/ argstoarray chisquare count_tied_groups describe f_oneway find_repeats friedmanchisquare kendalltau kendalltau_seasonal kruskalwallis ks_twosamp kurtosis kurtosistest linregress mannwhitneyu plotting_positions mode moment mquantiles msign normaltest obrientransform pearsonr plotting_positions pointbiserialr rankdata scoreatpercentile sem skew skewtest spearmanr theilslopes tmax tmean tmin trim trima trimboth trimmed_stde trimr trimtail tsem ttest_onesamp ttest_ind ttest_onesamp ttest_rel tvar variation winsorize zmap zscore compare_medians_ms gmean hdmedian hdquantiles hdquantiles_sd hmean idealfourths kruskal ks_2samp median_cihs meppf mjci mquantiles_cimj rsh sen_seasonal_slopes trimmed_mean trimmed_mean_ci trimmed_std trimmed_var ttest_1samp """ from __future__ import division, print_function, absolute_import from .mstats_basic import * from .mstats_extras import * # Functions that support masked array input in stats but need to be kept in the # mstats namespace for backwards compatibility: from scipy.stats import gmean, hmean, zmap, zscore, chisquare
1,883
18.22449
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/morestats.py
from __future__ import division, print_function, absolute_import import math import warnings from collections import namedtuple import numpy as np from numpy import (isscalar, r_, log, around, unique, asarray, zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil, floor, array, compress, pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot) from scipy._lib.six import string_types from scipy import optimize from scipy import special from . import statlib from . import stats from .stats import find_repeats, _contains_nan from .contingency import chi2_contingency from . import distributions from ._distn_infrastructure import rv_generic __all__ = ['mvsdist', 'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 'fligner', 'mood', 'wilcoxon', 'median_test', 'circmean', 'circvar', 'circstd', 'anderson_ksamp' ] Mean = namedtuple('Mean', ('statistic', 'minmax')) Variance = namedtuple('Variance', ('statistic', 'minmax')) Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) def bayes_mvs(data, alpha=0.90): r""" Bayesian confidence intervals for the mean, var, and std. Parameters ---------- data : array_like Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. Requires 2 or more data points. alpha : float, optional Probability that the returned confidence interval contains the true parameter. Returns ------- mean_cntr, var_cntr, std_cntr : tuple The three results are for the mean, variance and standard deviation, respectively. Each result is a tuple of the form:: (center, (lower, upper)) with `center` the mean of the conditional pdf of the value given the data, and `(lower, upper)` a confidence interval, centered on the median, containing the estimate to a probability ``alpha``. See Also -------- mvsdist Notes ----- Each tuple of mean, variance, and standard deviation estimates represent the (center, (lower, upper)) with center the mean of the conditional pdf of the value given the data and (lower, upper) is a confidence interval centered on the median, containing the estimate to a probability ``alpha``. Converts data to 1-D and assumes all data has the same mean and variance. Uses Jeffrey's prior for variance and std. Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- First a basic example to demonstrate the outputs: >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.bayes_mvs(data) >>> mean Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) >>> var Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) >>> std Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631)) Now we generate some normally distributed random data, and get estimates of mean and standard deviation with 95% confidence intervals for those estimates: >>> n_samples = 100000 >>> data = stats.norm.rvs(size=n_samples) >>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.hist(data, bins=100, density=True, label='Histogram of data') >>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') >>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', ... alpha=0.2, label=r'Estimated mean (95% limits)') >>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') >>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, ... label=r'Estimated scale (95% limits)') >>> ax.legend(fontsize=10) >>> ax.set_xlim([-4, 4]) >>> ax.set_ylim([0, 0.5]) >>> plt.show() """ m, v, s = mvsdist(data) if alpha >= 1 or alpha <= 0: raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha) m_res = Mean(m.mean(), m.interval(alpha)) v_res = Variance(v.mean(), v.interval(alpha)) s_res = Std_dev(s.mean(), s.interval(alpha)) return m_res, v_res, s_res def mvsdist(data): """ 'Frozen' distributions for mean, variance, and standard deviation of data. Parameters ---------- data : array_like Input array. Converted to 1-D using ravel. Requires 2 or more data-points. Returns ------- mdist : "frozen" distribution object Distribution object representing the mean of the data vdist : "frozen" distribution object Distribution object representing the variance of the data sdist : "frozen" distribution object Distribution object representing the standard deviation of the data See Also -------- bayes_mvs Notes ----- The return values from ``bayes_mvs(data)`` is equivalent to ``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` on the three distribution objects returned from this function will give the same results that are returned from `bayes_mvs`. References ---------- T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 2006. Examples -------- >>> from scipy import stats >>> data = [6, 9, 12, 7, 8, 8, 13] >>> mean, var, std = stats.mvsdist(data) We now have frozen distribution objects "mean", "var" and "std" that we can examine: >>> mean.mean() 9.0 >>> mean.interval(0.95) (6.6120585482655692, 11.387941451734431) >>> mean.std() 1.1952286093343936 """ x = ravel(data) n = len(x) if n < 2: raise ValueError("Need at least 2 data-points.") xbar = x.mean() C = x.var() if n > 1000: # gaussian approximations for large n mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) else: nm1 = n - 1 fac = n * C / 2. val = nm1 / 2. mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) vdist = distributions.invgamma(val, scale=fac) return mdist, vdist, sdist def kstat(data, n=2): r""" Return the nth k-statistic (1<=n<=4 so far). The nth k-statistic k_n is the unique symmetric unbiased estimator of the nth cumulant kappa_n. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2, 3, 4}, optional Default is equal to 2. Returns ------- kstat : float The nth k-statistic. See Also -------- kstatvar: Returns an unbiased estimator of the variance of the k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- For a sample size n, the first few k-statistics are given by: .. math:: k_{1} = \mu k_{2} = \frac{n}{n-1} m_{2} k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} where :math:`\mu` is the sample mean, :math:`m_2` is the sample variance, and :math:`m_i` is the i-th sample central moment. References ---------- http://mathworld.wolfram.com/k-Statistic.html http://mathworld.wolfram.com/Cumulant.html Examples -------- >>> from scipy import stats >>> rndm = np.random.RandomState(1234) As sample size increases, n-th moment and n-th k-statistic converge to the same number (although they aren't identical). In the case of the normal distribution, they converge to zero. >>> for n in [2, 3, 4, 5, 6, 7]: ... x = rndm.normal(size=10**n) ... m, k = stats.moment(x, 3), stats.kstat(x, 3) ... print("%.3g %.3g %.3g" % (m, k, m-k)) -0.631 -0.651 0.0194 0.0282 0.0283 -8.49e-05 -0.0454 -0.0454 1.36e-05 7.53e-05 7.53e-05 -2.26e-09 0.00166 0.00166 -4.99e-09 -2.88e-06 -2.88e-06 8.63e-13 """ if n > 4 or n < 1: raise ValueError("k-statistics only supported for 1<=n<=4") n = int(n) S = np.zeros(n + 1, np.float64) data = ravel(data) N = data.size # raise ValueError on empty input if N == 0: raise ValueError("Data input must not be empty") # on nan input, return nan without warning if np.isnan(np.sum(data)): return np.nan for k in range(1, n + 1): S[k] = np.sum(data**k, axis=0) if n == 1: return S[1] * 1.0/N elif n == 2: return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) elif n == 3: return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) elif n == 4: return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / (N*(N-1.0)*(N-2.0)*(N-3.0))) else: raise ValueError("Should not be here.") def kstatvar(data, n=2): r""" Returns an unbiased estimator of the variance of the k-statistic. See `kstat` for more details of the k-statistic. Parameters ---------- data : array_like Input array. Note that n-D input gets flattened. n : int, {1, 2}, optional Default is equal to 2. Returns ------- kstatvar : float The nth k-statistic variance. See Also -------- kstat: Returns the n-th k-statistic. moment: Returns the n-th central moment about the mean for a sample. Notes ----- The variances of the first few k-statistics are given by: .. math:: var(k_{1}) = \frac{\kappa^2}{n} var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + \frac{9 \kappa^2_{3}}{n - 1} + \frac{6 n \kappa^3_{2}}{(n-1) (n-2)} var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + \frac{48 \kappa_{3} \kappa_5}{n - 1} + \frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + \frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + \frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} """ data = ravel(data) N = len(data) if n == 1: return kstat(data, n=2) * 1.0/N elif n == 2: k2 = kstat(data, n=2) k4 = kstat(data, n=4) return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) else: raise ValueError("Only n=1 or n=2 supported.") def _calc_uniform_order_statistic_medians(n): """ Approximations of uniform order statistic medians. Parameters ---------- n : int Sample size. Returns ------- v : 1d float array Approximations of the order statistic medians. References ---------- .. [1] James J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- Order statistics of the uniform distribution on the unit interval are marginally distributed according to beta distributions. The expectations of these order statistic are evenly spaced across the interval, but the distributions are skewed in a way that pushes the medians slightly towards the endpoints of the unit interval: >>> n = 4 >>> k = np.arange(1, n+1) >>> from scipy.stats import beta >>> a = k >>> b = n-k+1 >>> beta.mean(a, b) array([ 0.2, 0.4, 0.6, 0.8]) >>> beta.median(a, b) array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642]) The Filliben approximation uses the exact medians of the smallest and greatest order statistics, and the remaining medians are approximated by points spread evenly across a sub-interval of the unit interval: >>> from scipy.morestats import _calc_uniform_order_statistic_medians >>> _calc_uniform_order_statistic_medians(n) array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642]) This plot shows the skewed distributions of the order statistics of a sample of size four from a uniform distribution on the unit interval: >>> import matplotlib.pyplot as plt >>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) >>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] >>> plt.figure() >>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) """ v = np.zeros(n, dtype=np.float64) v[-1] = 0.5**(1.0 / n) v[0] = 1 - v[-1] i = np.arange(2, n) v[1:-1] = (i - 0.3175) / (n + 0.365) return v def _parse_dist_kw(dist, enforce_subclass=True): """Parse `dist` keyword. Parameters ---------- dist : str or stats.distributions instance. Several functions take `dist` as a keyword, hence this utility function. enforce_subclass : bool, optional If True (default), `dist` needs to be a `_distn_infrastructure.rv_generic` instance. It can sometimes be useful to set this keyword to False, if a function wants to accept objects that just look somewhat like such an instance (for example, they have a ``ppf`` method). """ if isinstance(dist, rv_generic): pass elif isinstance(dist, string_types): try: dist = getattr(distributions, dist) except AttributeError: raise ValueError("%s is not a valid distribution name" % dist) elif enforce_subclass: msg = ("`dist` should be a stats.distributions instance or a string " "with the name of such a distribution.") raise ValueError(msg) return dist def _add_axis_labels_title(plot, xlabel, ylabel, title): """Helper function to add axes labels and a title to stats plots""" try: if hasattr(plot, 'set_title'): # Matplotlib Axes instance or something that looks like it plot.set_title(title) plot.set_xlabel(xlabel) plot.set_ylabel(ylabel) else: # matplotlib.pyplot module plot.title(title) plot.xlabel(xlabel) plot.ylabel(ylabel) except: # Not an MPL object or something that looks (enough) like it. # Don't crash on adding labels or title pass def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): """ Calculate quantiles for a probability plot, and optionally show the plot. Generates a probability plot of sample data against the quantiles of a specified theoretical distribution (the normal distribution by default). `probplot` optionally calculates a best-fit line for the data and plots the results using Matplotlib or a given plot function. Parameters ---------- x : array_like Sample/response data from which `probplot` creates the plot. sparams : tuple, optional Distribution-specific shape parameters (shape parameters plus location and scale). dist : str or stats.distributions instance, optional Distribution or distribution function name. The default is 'norm' for a normal probability plot. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. fit : bool, optional Fit a least-squares regression (best-fit) line to the sample data if True (default). plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. Returns ------- (osm, osr) : tuple of ndarrays Tuple of theoretical quantiles (osm, or order statistic medians) and ordered responses (osr). `osr` is simply sorted input `x`. For details on how `osm` is calculated see the Notes section. (slope, intercept, r) : tuple of floats, optional Tuple containing the result of the least-squares fit, if that is performed by `probplot`. `r` is the square root of the coefficient of determination. If ``fit=False`` and ``plot=None``, this tuple is not returned. Notes ----- Even if `plot` is given, the figure is not shown or saved by `probplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. `probplot` generates a probability plot, which should not be confused with a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this type, see ``statsmodels.api.ProbPlot``. The formula used for the theoretical quantiles (horizontal axis of the probability plot) is Filliben's estimate:: quantiles = dist.ppf(val), for 0.5**(1/n), for i = n val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 1 - 0.5**(1/n), for i = 1 where ``i`` indicates the i-th ordered value and ``n`` is the total number of values. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> nsample = 100 >>> np.random.seed(7654321) A t distribution with small degrees of freedom: >>> ax1 = plt.subplot(221) >>> x = stats.t.rvs(3, size=nsample) >>> res = stats.probplot(x, plot=plt) A t distribution with larger degrees of freedom: >>> ax2 = plt.subplot(222) >>> x = stats.t.rvs(25, size=nsample) >>> res = stats.probplot(x, plot=plt) A mixture of two normal distributions with broadcasting: >>> ax3 = plt.subplot(223) >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], ... size=(nsample//2,2)).ravel() >>> res = stats.probplot(x, plot=plt) A standard normal distribution: >>> ax4 = plt.subplot(224) >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) >>> res = stats.probplot(x, plot=plt) Produce a new figure with a loggamma distribution, using the ``dist`` and ``sparams`` keywords: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> x = stats.loggamma.rvs(c=2.5, size=500) >>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) >>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") Show the results with Matplotlib: >>> plt.show() """ x = np.asarray(x) _perform_fit = fit or (plot is not None) if x.size == 0: if _perform_fit: return (x, x), (np.nan, np.nan, 0.0) else: return x, x osm_uniform = _calc_uniform_order_statistic_medians(len(x)) dist = _parse_dist_kw(dist, enforce_subclass=False) if sparams is None: sparams = () if isscalar(sparams): sparams = (sparams,) if not isinstance(sparams, tuple): sparams = tuple(sparams) osm = dist.ppf(osm_uniform, *sparams) osr = sort(x) if _perform_fit: # perform a linear least squares fit. slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) if plot is not None: plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') _add_axis_labels_title(plot, xlabel='Theoretical quantiles', ylabel='Ordered Values', title='Probability Plot') # Add R^2 value to the plot as text if rvalue: xmin = amin(osm) xmax = amax(osm) ymin = amin(x) ymax = amax(x) posx = xmin + 0.70 * (xmax - xmin) posy = ymin + 0.01 * (ymax - ymin) plot.text(posx, posy, "$R^2=%1.4f$" % r**2) if fit: return (osm, osr), (slope, intercept, r) else: return osm, osr def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): """ Calculate the shape parameter that maximizes the PPCC The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. ppcc_max returns the shape parameter that would maximize the probability plot correlation coefficient for the given data to a one-parameter family of distributions. Parameters ---------- x : array_like Input array. brack : tuple, optional Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) then they are assumed to be a starting interval for a downhill bracket search (see `scipy.optimize.brent`). dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. Returns ------- shape_value : float The shape parameter at which the probability plot correlation coefficient reaches its max value. See also -------- ppcc_plot, probplot, boxcox Notes ----- The brack keyword serves as a starting point which is useful in corner cases. One can use a plot to obtain a rough visual estimate of the location for the maximum to start the search near it. References ---------- .. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, ... random_state=1234567) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> import matplotlib.pyplot as plt >>> fig = plt.figure(figsize=(8, 6)) >>> ax = fig.add_subplot(111) >>> res = stats.ppcc_plot(x, -5, 5, plot=ax) We calculate the value where the shape should reach its maximum and a red line is drawn there. The line should coincide with the highest point in the ppcc_plot. >>> max = stats.ppcc_max(x) >>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ dist = _parse_dist_kw(dist) osm_uniform = _calc_uniform_order_statistic_medians(len(x)) osr = sort(x) # this function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) # and returns 1-r so that a minimization function maximizes the # correlation def tempfunc(shape, mi, yvals, func): xvals = func(mi, shape) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): """ Calculate and optionally plot probability plot correlation coefficient. The probability plot correlation coefficient (PPCC) plot can be used to determine the optimal shape parameter for a one-parameter family of distributions. It cannot be used for distributions without shape parameters (like the normal distribution) or with multiple shape parameters. By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed distributions via an approximately normal one, and is therefore particularly useful in practice. Parameters ---------- x : array_like Input array. a, b: scalar Lower and upper bounds of the shape parameter to use. dist : str or stats.distributions instance, optional Distribution or distribution function name. Objects that look enough like a stats.distributions instance (i.e. they have a ``ppf`` method) are also accepted. The default is ``'tukeylambda'``. plot : object, optional If given, plots PPCC against the shape parameter. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `a` to `b`). Returns ------- svals : ndarray The shape values for which `ppcc` was calculated. ppcc : ndarray The calculated probability plot correlation coefficient values. See also -------- ppcc_max, probplot, boxcox_normplot, tukeylambda References ---------- J.J. Filliben, "The Probability Plot Correlation Coefficient Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. Examples -------- First we generate some random data from a Tukey-Lambda distribution, with shape parameter -0.7: >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234567) >>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 Now we explore this data with a PPCC plot as well as the related probability plot and Box-Cox normplot. A red line is drawn where we expect the PPCC value to be maximal (at the shape parameter -0.7 used above): >>> fig = plt.figure(figsize=(12, 4)) >>> ax1 = fig.add_subplot(131) >>> ax2 = fig.add_subplot(132) >>> ax3 = fig.add_subplot(133) >>> res = stats.probplot(x, plot=ax1) >>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2) >>> res = stats.ppcc_plot(x, -5, 5, plot=ax3) >>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') >>> plt.show() """ if b <= a: raise ValueError("`b` has to be larger than `a`.") svals = np.linspace(a, b, num=N) ppcc = np.empty_like(svals) for k, sval in enumerate(svals): _, r2 = probplot(x, sval, dist=dist, fit=True) ppcc[k] = r2[-1] if plot is not None: plot.plot(svals, ppcc, 'x') _add_axis_labels_title(plot, xlabel='Shape Values', ylabel='Prob Plot Corr. Coef.', title='(%s) PPCC Plot' % dist) return svals, ppcc def boxcox_llf(lmb, data): r"""The boxcox log-likelihood function. Parameters ---------- lmb : scalar Parameter for Box-Cox transformation. See `boxcox` for details. data : array_like Data to calculate Box-Cox log-likelihood for. If `data` is multi-dimensional, the log-likelihood is calculated along the first axis. Returns ------- llf : float or ndarray Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, an array otherwise. See Also -------- boxcox, probplot, boxcox_normplot, boxcox_normmax Notes ----- The Box-Cox log-likelihood function is defined here as .. math:: llf = (\lambda - 1) \sum_i(\log(x_i)) - N/2 \log(\sum_i (y_i - \bar{y})^2 / N), where ``y`` is the Box-Cox transformed input data ``x``. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes >>> np.random.seed(1245) Generate some random variates and calculate Box-Cox log-likelihood values for them for a range of ``lmbda`` values: >>> x = stats.loggamma.rvs(5, loc=10, size=1000) >>> lmbdas = np.linspace(-2, 10) >>> llf = np.zeros(lmbdas.shape, dtype=float) >>> for ii, lmbda in enumerate(lmbdas): ... llf[ii] = stats.boxcox_llf(lmbda, x) Also find the optimal lmbda value with `boxcox`: >>> x_most_normal, lmbda_optimal = stats.boxcox(x) Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a horizontal line to check that that's really the optimum: >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(lmbdas, llf, 'b.-') >>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') >>> ax.set_xlabel('lmbda parameter') >>> ax.set_ylabel('Box-Cox log-likelihood') Now add some probability plots to show that where the log-likelihood is maximized the data transformed with `boxcox` looks closest to normal: >>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' >>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): ... xt = stats.boxcox(x, lmbda=lmbda) ... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) ... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) ... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') ... ax_inset.set_xticklabels([]) ... ax_inset.set_yticklabels([]) ... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) >>> plt.show() """ data = np.asarray(data) N = data.shape[0] if N == 0: return np.nan y = boxcox(data, lmb) y_mean = np.mean(y, axis=0) llf = (lmb - 1) * np.sum(np.log(data), axis=0) llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) return llf def _boxcox_conf_interval(x, lmax, alpha): # Need to find the lambda for which # f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) target = boxcox_llf(lmax, x) - fac def rootfunc(lmbda, data, target): return boxcox_llf(lmbda, data) - target # Find positive endpoint of interval in which answer is to be found newlm = lmax + 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm += 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) # Now find negative interval in the same way newlm = lmax - 0.5 N = 0 while (rootfunc(newlm, x, target) > 0.0) and (N < 500): newlm -= 0.1 N += 1 if N == 500: raise RuntimeError("Could not find endpoint.") lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) return lmminus, lmplus def boxcox(x, lmbda=None, alpha=None): r""" Return a positive dataset transformed by a Box-Cox power transformation. Parameters ---------- x : ndarray Input array. Should be 1-dimensional. lmbda : {None, scalar}, optional If `lmbda` is not None, do the transformation for that value. If `lmbda` is None, find the lambda that maximizes the log-likelihood function and return it as the second output argument. alpha : {None, float}, optional If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence interval for `lmbda` as the third output argument. Must be between 0.0 and 1.0. Returns ------- boxcox : ndarray Box-Cox power transformed array. maxlog : float, optional If the `lmbda` parameter is None, the second returned argument is the lambda that maximizes the log-likelihood function. (min_ci, max_ci) : tuple of float, optional If `lmbda` parameter is None and ``alpha`` is not None, this returned tuple of floats represents the minimum and maximum confidence limits given ``alpha``. See Also -------- probplot, boxcox_normplot, boxcox_normmax, boxcox_llf Notes ----- The Box-Cox transform is given by:: y = (x**lmbda - 1) / lmbda, for lmbda > 0 log(x), for lmbda = 0 `boxcox` requires the input data to be positive. Sometimes a Box-Cox transformation provides a shift parameter to achieve this; `boxcox` does not. Such a shift parameter is equivalent to adding a positive constant to `x` before calling `boxcox`. The confidence limits returned when ``alpha`` is provided give the interval where: .. math:: llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared function. References ---------- G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the Royal Statistical Society B, 26, 211-252 (1964). Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt We generate some random variates from a non-normal distribution and make a probability plot for it, to show it is non-normal in the tails: >>> fig = plt.figure() >>> ax1 = fig.add_subplot(211) >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) >>> ax1.set_xlabel('') >>> ax1.set_title('Probplot against normal distribution') We now use `boxcox` to transform the data so it's closest to normal: >>> ax2 = fig.add_subplot(212) >>> xt, _ = stats.boxcox(x) >>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) >>> ax2.set_title('Probplot after Box-Cox transformation') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if any(x <= 0): raise ValueError("Data must be positive.") if lmbda is not None: # single transformation return special.boxcox(x, lmbda) # If lmbda=None, find the lmbda that maximizes the log-likelihood function. lmax = boxcox_normmax(x, method='mle') y = boxcox(x, lmax) if alpha is None: return y, lmax else: # Find confidence interval interval = _boxcox_conf_interval(x, lmax, alpha) return y, lmax, interval def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): """Compute optimal Box-Cox transform parameter for input data. Parameters ---------- x : array_like Input array. brack : 2-tuple, optional The starting interval for a downhill bracket search with `optimize.brent`. Note that this is in most cases not critical; the final result is allowed to be outside this bracket. method : str, optional The method to determine the optimal transform parameter (`boxcox` ``lmbda`` parameter). Options are: 'pearsonr' (default) Maximizes the Pearson correlation coefficient between ``y = boxcox(x)`` and the expected values for ``y`` if `x` would be normally-distributed. 'mle' Minimizes the log-likelihood `boxcox_llf`. This is the method used in `boxcox`. 'all' Use all optimization methods available, and return all results. Useful to compare different methods. Returns ------- maxlog : float or ndarray The optimal transform parameter found. An array instead of a scalar for ``method='all'``. See Also -------- boxcox, boxcox_llf, boxcox_normplot Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt >>> np.random.seed(1234) # make this example reproducible Generate some data and determine optimal ``lmbda`` in various ways: >>> x = stats.loggamma.rvs(5, size=30) + 5 >>> y, lmax_mle = stats.boxcox(x) >>> lmax_pearsonr = stats.boxcox_normmax(x) >>> lmax_mle 7.177... >>> lmax_pearsonr 7.916... >>> stats.boxcox_normmax(x, method='all') array([ 7.91667384, 7.17718692]) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) >>> ax.axvline(lmax_mle, color='r') >>> ax.axvline(lmax_pearsonr, color='g', ls='--') >>> plt.show() """ def _pearsonr(x, brack): osm_uniform = _calc_uniform_order_statistic_medians(len(x)) xvals = distributions.norm.ppf(osm_uniform) def _eval_pearsonr(lmbda, xvals, samps): # This function computes the x-axis values of the probability plot # and computes a linear regression (including the correlation) and # returns ``1 - r`` so that a minimization function maximizes the # correlation. y = boxcox(samps, lmbda) yvals = np.sort(y) r, prob = stats.pearsonr(xvals, yvals) return 1 - r return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) def _mle(x, brack): def _eval_mle(lmb, data): # function to minimize return -boxcox_llf(lmb, data) return optimize.brent(_eval_mle, brack=brack, args=(x,)) def _all(x, brack): maxlog = np.zeros(2, dtype=float) maxlog[0] = _pearsonr(x, brack) maxlog[1] = _mle(x, brack) return maxlog methods = {'pearsonr': _pearsonr, 'mle': _mle, 'all': _all} if method not in methods.keys(): raise ValueError("Method %s not recognized." % method) optimfunc = methods[method] return optimfunc(x, brack) def boxcox_normplot(x, la, lb, plot=None, N=80): """Compute parameters for a Box-Cox normality plot, optionally show it. A Box-Cox normality plot shows graphically what the best transformation parameter is to use in `boxcox` to obtain a distribution that is close to normal. Parameters ---------- x : array_like Input array. la, lb : scalar The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` for Box-Cox transformations. These are also the limits of the horizontal axis of the plot if that is generated. plot : object, optional If given, plots the quantiles and least squares fit. `plot` is an object that has to have methods "plot" and "text". The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, or a custom object with the same methods. Default is None, which means that no plot is created. N : int, optional Number of points on the horizontal axis (equally distributed from `la` to `lb`). Returns ------- lmbdas : ndarray The ``lmbda`` values for which a Box-Cox transform was done. ppcc : ndarray Probability Plot Correlelation Coefficient, as obtained from `probplot` when fitting the Box-Cox transformed input `x` against a normal distribution. See Also -------- probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max Notes ----- Even if `plot` is given, the figure is not shown or saved by `boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` should be used after calling `probplot`. Examples -------- >>> from scipy import stats >>> import matplotlib.pyplot as plt Generate some non-normally distributed data, and create a Box-Cox plot: >>> x = stats.loggamma.rvs(5, size=500) + 5 >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in the same plot: >>> _, maxlog = stats.boxcox(x) >>> ax.axvline(maxlog, color='r') >>> plt.show() """ x = np.asarray(x) if x.size == 0: return x if lb <= la: raise ValueError("`lb` has to be larger than `la`.") lmbdas = np.linspace(la, lb, num=N) ppcc = lmbdas * 0.0 for i, val in enumerate(lmbdas): # Determine for each lmbda the correlation coefficient of transformed x z = boxcox(x, lmbda=val) _, r2 = probplot(z, dist='norm', fit=True) ppcc[i] = r2[-1] if plot is not None: plot.plot(lmbdas, ppcc, 'x') _add_axis_labels_title(plot, xlabel='$\\lambda$', ylabel='Prob Plot Corr. Coef.', title='Box-Cox Normality Plot') return lmbdas, ppcc def shapiro(x): """ Perform the Shapiro-Wilk test for normality. The Shapiro-Wilk test tests the null hypothesis that the data was drawn from a normal distribution. Parameters ---------- x : array_like Array of sample data. Returns ------- W : float The test statistic. p-value : float The p-value for the hypothesis test. See Also -------- anderson : The Anderson-Darling test for normality kstest : The Kolmogorov-Smirnov test for goodness of fit. Notes ----- The algorithm used is described in [4]_ but censoring parameters as described are not implemented. For N > 5000 the W test statistic is accurate but the p-value may not be. The chance of rejecting the null hypothesis when it is true is close to 5% regardless of sample size. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for normality (complete samples), Biometrika, Vol. 52, pp. 591-611. .. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk, Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of Statistical Modeling and Analytics, Vol. 2, pp. 21-33. .. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4. Examples -------- >>> from scipy import stats >>> np.random.seed(12345678) >>> x = stats.norm.rvs(loc=5, scale=3, size=100) >>> stats.shapiro(x) (0.9772805571556091, 0.08144091814756393) """ x = np.ravel(x) N = len(x) if N < 3: raise ValueError("Data must be at least length 3.") a = zeros(N, 'f') init = 0 y = sort(x) a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) if ifault not in [0, 2]: warnings.warn("Input data for shapiro has range zero. The results " "may not be accurate.") if N > 5000: warnings.warn("p-value may not be accurate for N > 5000.") return w, pw # Values from Stephens, M A, "EDF Statistics for Goodness of Fit and # Some Comparisons", Journal of he American Statistical # Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 _Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) _Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) # From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", # Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. _Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) # From Stephens, M A, "Tests of Fit for the Logistic Distribution Based # on the Empirical Distribution Function.", Biometrika, # Vol. 66, Issue 3, Dec. 1979, pp 591-595. _Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) AndersonResult = namedtuple('AndersonResult', ('statistic', 'critical_values', 'significance_level')) def anderson(x, dist='norm'): """ Anderson-Darling test for data coming from a particular distribution The Anderson-Darling tests the null hypothesis that a sample is drawn from a population that follows a particular distribution. For the Anderson-Darling test, the critical values depend on which distribution is being tested against. This function works for normal, exponential, logistic, or Gumbel (Extreme Value Type I) distributions. Parameters ---------- x : array_like array of sample data dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r', 'extreme1'}, optional the type of distribution to test against. The default is 'norm' and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms. Returns ------- statistic : float The Anderson-Darling test statistic critical_values : list The critical values for this distribution significance_level : list The significance levels for the corresponding critical values in percents. The function returns critical values for a differing set of significance levels depending on the distribution that is being tested against. See Also -------- kstest : The Kolmogorov-Smirnov test for goodness-of-fit. Notes ----- Critical values provided are for the following significance levels: normal/exponenential 15%, 10%, 5%, 2.5%, 1% logistic 25%, 10%, 5%, 2.5%, 1%, 0.5% Gumbel 25%, 10%, 5%, 2.5%, 1% If the returned statistic is larger than these critical values then for the corresponding significance level, the null hypothesis that the data come from the chosen distribution can be rejected. The returned statistic is referred to as 'A2' in the references. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm .. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and Some Comparisons, Journal of the American Statistical Association, Vol. 69, pp. 730-737. .. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, pp. 357-369. .. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value Distribution, Biometrika, Vol. 64, pp. 583-588. .. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference to Tests for Exponentiality , Technical Report No. 262, Department of Statistics, Stanford University, Stanford, CA. .. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution Based on the Empirical Distribution Function, Biometrika, Vol. 66, pp. 591-595. """ if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l', 'gumbel_r', 'extreme1', 'logistic']: raise ValueError("Invalid distribution; dist must be 'norm', " "'expon', 'gumbel', 'extreme1' or 'logistic'.") y = sort(x) xbar = np.mean(x, axis=0) N = len(y) if dist == 'norm': s = np.std(x, ddof=1, axis=0) w = (y - xbar) / s logcdf = distributions.norm.logcdf(w) logsf = distributions.norm.logsf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) elif dist == 'expon': w = y / xbar logcdf = distributions.expon.logcdf(w) logsf = distributions.expon.logsf(w) sig = array([15, 10, 5, 2.5, 1]) critical = around(_Avals_expon / (1.0 + 0.6/N), 3) elif dist == 'logistic': def rootfunc(ab, xj, N): a, b = ab tmp = (xj - a) / b tmp2 = exp(tmp) val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] return array(val) sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) w = (y - sol[0]) / sol[1] logcdf = distributions.logistic.logcdf(w) logsf = distributions.logistic.logsf(w) sig = array([25, 10, 5, 2.5, 1, 0.5]) critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) elif dist == 'gumbel_r': xbar, s = distributions.gumbel_r.fit(x) w = (y - xbar) / s logcdf = distributions.gumbel_r.logcdf(w) logsf = distributions.gumbel_r.logsf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1') xbar, s = distributions.gumbel_l.fit(x) w = (y - xbar) / s logcdf = distributions.gumbel_l.logcdf(w) logsf = distributions.gumbel_l.logsf(w) sig = array([25, 10, 5, 2.5, 1]) critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) i = arange(1, N + 1) A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) return AndersonResult(A2, critical, sig) def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 7 of Scholz and Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2aKN : float The A2aKN statistics of Scholz and Stephens 1987. """ A2akN = 0. Z_ssorted_left = Z.searchsorted(Zstar, 'left') if N == Zstar.size: lj = 1. else: lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left Bj = Z_ssorted_left + lj / 2. for i in arange(0, k): s = np.sort(samples[i]) s_ssorted_right = s.searchsorted(Zstar, side='right') Mij = s_ssorted_right.astype(float) fij = s_ssorted_right - s.searchsorted(Zstar, 'left') Mij -= fij / 2. inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) A2akN += inner.sum() / n[i] A2akN *= (N - 1.) / N return A2akN def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): """ Compute A2akN equation 6 of Scholz & Stephens. Parameters ---------- samples : sequence of 1-D array_like Array of sample arrays. Z : array_like Sorted array of all observations. Zstar : array_like Sorted array of unique observations. k : int Number of samples. n : array_like Number of observations in each sample. N : int Total number of observations. Returns ------- A2KN : float The A2KN statistics of Scholz and Stephens 1987. """ A2kN = 0. lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 'left') Bj = lj.cumsum() for i in arange(0, k): s = np.sort(samples[i]) Mij = s.searchsorted(Zstar[:-1], side='right') inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) A2kN += inner.sum() / n[i] return A2kN Anderson_ksampResult = namedtuple('Anderson_ksampResult', ('statistic', 'critical_values', 'significance_level')) def anderson_ksamp(samples, midrank=True): """The Anderson-Darling test for k-samples. The k-sample Anderson-Darling test is a modification of the one-sample Anderson-Darling test. It tests the null hypothesis that k-samples are drawn from the same population without having to specify the distribution function of that population. The critical values depend on the number of samples. Parameters ---------- samples : sequence of 1-D array_like Array of sample data in arrays. midrank : bool, optional Type of Anderson-Darling test which is computed. Default (True) is the midrank test applicable to continuous and discrete populations. If False, the right side empirical distribution is used. Returns ------- statistic : float Normalized k-sample Anderson-Darling test statistic. critical_values : array The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. significance_level : float An approximate significance level at which the null hypothesis for the provided samples can be rejected. Raises ------ ValueError If less than 2 samples are provided, a sample is empty, or no distinct observations are in the samples. See Also -------- ks_2samp : 2 sample Kolmogorov-Smirnov test anderson : 1 sample Anderson-Darling test Notes ----- [1]_ Defines three versions of the k-sample Anderson-Darling test: one for continuous distributions and two for discrete distributions, in which ties between samples may occur. The default of this routine is to compute the version based on the midrank empirical distribution function. This test is applicable to continuous and discrete data. If midrank is set to False, the right side empirical distribution is used for a test for discrete data. According to [1]_, the two discrete test statistics differ only slightly if a few collisions due to round-off errors occur in the test not adjusted for ties between samples. .. versionadded:: 0.14.0 References ---------- .. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample Anderson-Darling Tests, Journal of the American Statistical Association, Vol. 82, pp. 918-924. Examples -------- >>> from scipy import stats >>> np.random.seed(314159) The null hypothesis that the two random samples come from the same distribution can be rejected at the 5% level because the returned test value is greater than the critical value for 5% (1.961) but not at the 2.5% level. The interpolation gives an approximate significance level of 3.1%: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(loc=0.5, size=30)]) (2.4615796189876105, array([ 0.325, 1.226, 1.961, 2.718, 3.752]), 0.03134990135800783) The null hypothesis cannot be rejected for three samples from an identical distribution. The approximate p-value (87%) has to be computed by extrapolation and may not be very accurate: >>> stats.anderson_ksamp([np.random.normal(size=50), ... np.random.normal(size=30), np.random.normal(size=20)]) (-0.73091722665244196, array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]), 0.8789283903979661) """ k = len(samples) if (k < 2): raise ValueError("anderson_ksamp needs at least two samples") samples = list(map(np.asarray, samples)) Z = np.sort(np.hstack(samples)) N = Z.size Zstar = np.unique(Z) if Zstar.size < 2: raise ValueError("anderson_ksamp needs more than one distinct " "observation") n = np.array([sample.size for sample in samples]) if any(n == 0): raise ValueError("anderson_ksamp encountered sample without " "observations") if midrank: A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) else: A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) H = (1. / n).sum() hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() h = hs_cs[-1] + 1 g = (hs_cs / arange(2, N)).sum() a = (4*g - 6) * (k - 1) + (10 - 6*g)*H b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h d = (2*h + 6)*k**2 - 4*h*k sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) m = k - 1 A2 = (A2kN - m) / math.sqrt(sigmasq) # The b_i values are the interpolation coefficients from Table 2 # of Scholz and Stephens 1987 b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326]) b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822]) b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396]) critical = b0 + b1 / math.sqrt(m) + b2 / m pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2) if A2 < critical.min() or A2 > critical.max(): warnings.warn("approximate p-value will be computed by extrapolation") try: p = math.exp(np.polyval(pf, A2)) except (OverflowError,): p = float("inf") return Anderson_ksampResult(A2, critical, p) AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) def ansari(x, y): """ Perform the Ansari-Bradley test for equal scale parameters The Ansari-Bradley test is a non-parametric test for the equality of the scale parameter of the distributions from which two samples were drawn. Parameters ---------- x, y : array_like arrays of sample data Returns ------- statistic : float The Ansari-Bradley test statistic pvalue : float The p-value of the hypothesis test See Also -------- fligner : A non-parametric test for the equality of k variances mood : A non-parametric test for the equality of two scale parameters Notes ----- The p-value given is exact when the sample sizes are both less than 55 and there are no ties, otherwise a normal approximation for the p-value is used. References ---------- .. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. """ x, y = asarray(x), asarray(y) n = len(x) m = len(y) if m < 1: raise ValueError("Not enough other observations.") if n < 1: raise ValueError("Not enough test observations.") N = m + n xy = r_[x, y] # combine rank = stats.rankdata(xy) symrank = amin(array((rank, N - rank + 1)), 0) AB = np.sum(symrank[:n], axis=0) uxy = unique(xy) repeats = (len(uxy) != len(xy)) exact = ((m < 55) and (n < 55) and not repeats) if repeats and (m < 55 or n < 55): warnings.warn("Ties preclude use of exact statistic.") if exact: astart, a1, ifault = statlib.gscale(n, m) ind = AB - astart total = np.sum(a1, axis=0) if ind < len(a1)/2.0: cind = int(ceil(ind)) if ind == cind: pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total else: pval = 2.0 * np.sum(a1[:cind], axis=0) / total else: find = int(floor(ind)) if ind == floor(ind): pval = 2.0 * np.sum(a1[find:], axis=0) / total else: pval = 2.0 * np.sum(a1[find+1:], axis=0) / total return AnsariResult(AB, min(1.0, pval)) # otherwise compute normal approximation if N % 2: # N odd mnAB = n * (N+1.0)**2 / 4.0 / N varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) else: mnAB = n * (N+2.0) / 4.0 varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) if repeats: # adjust variance estimates # compute np.sum(tj * rj**2,axis=0) fac = np.sum(symrank**2, axis=0) if N % 2: # N odd varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) else: # N even varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) z = (AB - mnAB) / sqrt(varAB) pval = distributions.norm.sf(abs(z)) * 2.0 return AnsariResult(AB, pval) BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) def bartlett(*args): """ Perform Bartlett's test for equal variances Bartlett's test tests the null hypothesis that all input samples are from populations with equal variances. For samples from significantly non-normal populations, Levene's test `levene` is more robust. Parameters ---------- sample1, sample2,... : array_like arrays of sample data. May be different lengths. Returns ------- statistic : float The test statistic. pvalue : float The p-value of the test. See Also -------- fligner : A non-parametric test for the equality of k variances levene : A robust parametric test for equality of k variances Notes ----- Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm .. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical Methods, Eighth Edition, Iowa State University Press. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical Tests. Proceedings of the Royal Society of London. Series A, Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. """ # Handle empty input for a in args: if np.asanyarray(a).size == 0: return BartlettResult(np.nan, np.nan) k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) ssq = zeros(k, 'd') for j in range(k): Ni[j] = len(args[j]) ssq[j] = np.var(args[j], ddof=1) Ntot = np.sum(Ni, axis=0) spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - 1.0/(Ntot - k)) T = numer / denom pval = distributions.chi2.sf(T, k - 1) # 1 - cdf return BartlettResult(T, pval) LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) def levene(*args, **kwds): """ Perform Levene test for equal variances. The Levene test tests the null hypothesis that all input samples are from populations with equal variances. Levene's test is an alternative to Bartlett's test `bartlett` in the case where there are significant deviations from normality. Parameters ---------- sample1, sample2, ... : array_like The sample data, possibly with different lengths center : {'mean', 'median', 'trimmed'}, optional Which function of the data to use in the test. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the test. Notes ----- Three variations of Levene's test are possible. The possibilities and their recommended usages are: * 'median' : Recommended for skewed (non-normal) distributions> * 'mean' : Recommended for symmetric, moderate-tailed distributions. * 'trimmed' : Recommended for heavy-tailed distributions. References ---------- .. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm .. [2] Levene, H. (1960). In Contributions to Probability and Statistics: Essays in Honor of Harold Hotelling, I. Olkin et al. eds., Stanford University Press, pp. 278-292. .. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American Statistical Association, 69, 364-367 """ # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("levene() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") Ni = zeros(k) Yci = zeros(k, 'd') if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" " or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(np.sort(arg), proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) for j in range(k): Ni[j] = len(args[j]) Yci[j] = func(args[j]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [None] * k for i in range(k): Zij[i] = abs(asarray(args[i]) - Yci[i]) # compute Zbari Zbari = zeros(k, 'd') Zbar = 0.0 for i in range(k): Zbari[i] = np.mean(Zij[i], axis=0) Zbar += Zbari[i] * Ni[i] Zbar /= Ntot numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) # compute denom_variance dvar = 0.0 for i in range(k): dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) denom = (k - 1.0) * dvar W = numer / denom pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf return LeveneResult(W, pval) def binom_test(x, n=None, p=0.5, alternative='two-sided'): """ Perform a test that the probability of success is p. This is an exact, two-sided test of the null hypothesis that the probability of success in a Bernoulli experiment is `p`. Parameters ---------- x : integer or array_like the number of successes, or if x has length 2, it is the number of successes and the number of failures. n : integer the number of trials. This is ignored if x gives both the number of successes and failures p : float, optional The hypothesized probability of success. 0 <= p <= 1. The default value is p = 0.5 alternative : {'two-sided', 'greater', 'less'}, optional Indicates the alternative hypothesis. The default value is 'two-sided'. Returns ------- p-value : float The p-value of the hypothesis test References ---------- .. [1] http://en.wikipedia.org/wiki/Binomial_test """ x = atleast_1d(x).astype(np.integer) if len(x) == 2: n = x[1] + x[0] x = x[0] elif len(x) == 1: x = x[0] if n is None or n < x: raise ValueError("n must be >= x") n = np.int_(n) else: raise ValueError("Incorrect length for x.") if (p > 1.0) or (p < 0.0): raise ValueError("p must be in range [0,1]") if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") if alternative == 'less': pval = distributions.binom.cdf(x, n, p) return pval if alternative == 'greater': pval = distributions.binom.sf(x-1, n, p) return pval # if alternative was neither 'less' nor 'greater', then it's 'two-sided' d = distributions.binom.pmf(x, n, p) rerr = 1 + 1e-7 if x == p * n: # special case as shortcut, would also be handled by `else` below pval = 1. elif x < p * n: i = np.arange(np.ceil(p * n), n+1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(x, n, p) + distributions.binom.sf(n - y, n, p)) else: i = np.arange(np.floor(p*n) + 1) y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) pval = (distributions.binom.cdf(y-1, n, p) + distributions.binom.sf(x-1, n, p)) return min(1.0, pval) def _apply_func(x, g, func): # g is list of indices into x # separating x into different groups # func should be applied over the groups g = unique(r_[0, g, len(x)]) output = [] for k in range(len(g) - 1): output.append(func(x[g[k]:g[k+1]])) return asarray(output) FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) def fligner(*args, **kwds): """ Perform Fligner-Killeen test for equality of variance. Fligner's test tests the null hypothesis that all input samples are from populations with equal variances. Fligner-Killeen's test is distribution free when populations are identical [2]_. Parameters ---------- sample1, sample2, ... : array_like Arrays of sample data. Need not be the same length. center : {'mean', 'median', 'trimmed'}, optional Keyword argument controlling which function of the data is used in computing the test statistic. The default is 'median'. proportiontocut : float, optional When `center` is 'trimmed', this gives the proportion of data points to cut from each end. (See `scipy.stats.trim_mean`.) Default is 0.05. Returns ------- statistic : float The test statistic. pvalue : float The p-value for the hypothesis test. See Also -------- bartlett : A parametric test for equality of k variances in normal samples levene : A robust parametric test for equality of k variances Notes ----- As with Levene's test there are three variants of Fligner's test that differ by the measure of central tendency used in the test. See `levene` for more information. Conover et al. (1981) examine many of the existing parametric and nonparametric tests by extensive simulations and they conclude that the tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be superior in terms of robustness of departures from normality and power [3]_. References ---------- .. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. http://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf .. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample tests for scale. 'Journal of the American Statistical Association.' 71(353), 210-213. .. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and Hypothesis Testing based on Quadratic Inference Function. Technical Report #99-03, Center for Likelihood Studies, Pennsylvania State University. .. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A comparative study of tests for homogeneity of variances, with applications to the outer continental shelf biding data. Technometrics, 23(4), 351-361. """ # Handle empty input for a in args: if np.asanyarray(a).size == 0: return FlignerResult(np.nan, np.nan) # Handle keyword arguments. center = 'median' proportiontocut = 0.05 for kw, value in kwds.items(): if kw not in ['center', 'proportiontocut']: raise TypeError("fligner() got an unexpected keyword " "argument '%s'" % kw) if kw == 'center': center = value else: proportiontocut = value k = len(args) if k < 2: raise ValueError("Must enter at least two input sample vectors.") if center not in ['mean', 'median', 'trimmed']: raise ValueError("Keyword argument <center> must be 'mean', 'median'" " or 'trimmed'.") if center == 'median': func = lambda x: np.median(x, axis=0) elif center == 'mean': func = lambda x: np.mean(x, axis=0) else: # center == 'trimmed' args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) func = lambda x: np.mean(x, axis=0) Ni = asarray([len(args[j]) for j in range(k)]) Yci = asarray([func(args[j]) for j in range(k)]) Ntot = np.sum(Ni, axis=0) # compute Zij's Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] allZij = [] g = [0] for i in range(k): allZij.extend(list(Zij[i])) g.append(len(allZij)) ranks = stats.rankdata(allZij) a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) # compute Aibar Aibar = _apply_func(a, g, np.sum) / Ni anbar = np.mean(a, axis=0) varsq = np.var(a, axis=0, ddof=1) Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf return FlignerResult(Xsq, pval) def mood(x, y, axis=0): """ Perform Mood's test for equal scale parameters. Mood's two-sample test for scale parameters is a non-parametric test for the null hypothesis that two samples are drawn from the same distribution with the same scale parameter. Parameters ---------- x, y : array_like Arrays of sample data. axis : int, optional The axis along which the samples are tested. `x` and `y` can be of different length along `axis`. If `axis` is None, `x` and `y` are flattened and the test is done on all values in the flattened arrays. Returns ------- z : scalar or ndarray The z-score for the hypothesis test. For 1-D inputs a scalar is returned. p-value : scalar ndarray The p-value for the hypothesis test. See Also -------- fligner : A non-parametric test for the equality of k variances ansari : A non-parametric test for the equality of 2 variances bartlett : A parametric test for equality of k variances in normal samples levene : A parametric test for equality of k variances Notes ----- The data are assumed to be drawn from probability distributions ``f(x)`` and ``f(x/s) / s`` respectively, for some probability density function f. The null hypothesis is that ``s == 1``. For multi-dimensional arrays, if the inputs are of shapes ``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the resulting z and p values will have shape ``(n0, n2, n3)``. Note that ``n1`` and ``m1`` don't have to be equal, but the other dimensions do. Examples -------- >>> from scipy import stats >>> np.random.seed(1234) >>> x2 = np.random.randn(2, 45, 6, 7) >>> x1 = np.random.randn(2, 30, 6, 7) >>> z, p = stats.mood(x1, x2, axis=1) >>> p.shape (2, 6, 7) Find the number of points where the difference in scale is not significant: >>> (p > 0.1).sum() 74 Perform the test with different scales: >>> x1 = np.random.randn(2, 30) >>> x2 = np.random.randn(2, 35) * 10.0 >>> stats.mood(x1, x2, axis=1) (array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07])) """ x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if axis is None: x = x.flatten() y = y.flatten() axis = 0 # Determine shape of the result arrays res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if ax != axis])): raise ValueError("Dimensions of x and y on all axes except `axis` " "should match") n = x.shape[axis] m = y.shape[axis] N = m + n if N < 3: raise ValueError("Not enough observations.") xy = np.concatenate((x, y), axis=axis) if axis != 0: xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], -1) # Generalized to the n-dimensional case by adding the axis argument, and # using for loops, since rankdata is not vectorized. For improving # performance consider vectorizing rankdata function. all_ranks = np.zeros_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0) # Approx stat. mnM = n * (N * N - 1.0) / 12 varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 z = (M - mnM) / sqrt(varM) # sf for right tail, cdf for left tail. Factor 2 for two-sidedness z_pos = z > 0 pval = np.zeros_like(z) pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) if res_shape == (): # Return scalars, not 0-D arrays z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return z, pval WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) def wilcoxon(x, y=None, zero_method="wilcox", correction=False): """ Calculate the Wilcoxon signed-rank test. The Wilcoxon signed-rank test tests the null hypothesis that two related paired samples come from the same distribution. In particular, it tests whether the distribution of the differences x - y is symmetric about zero. It is a non-parametric version of the paired T-test. Parameters ---------- x : array_like The first set of measurements. y : array_like, optional The second set of measurements. If `y` is not given, then the `x` array is considered to be the differences between the two sets of measurements. zero_method : string, {"pratt", "wilcox", "zsplit"}, optional "pratt": Pratt treatment: includes zero-differences in the ranking process (more conservative) "wilcox": Wilcox treatment: discards all zero-differences "zsplit": Zero rank split: just like Pratt, but spliting the zero rank between positive and negative ones correction : bool, optional If True, apply continuity correction by adjusting the Wilcoxon rank statistic by 0.5 towards the mean value when computing the z-statistic. Default is False. Returns ------- statistic : float The sum of the ranks of the differences above or below zero, whichever is smaller. pvalue : float The two-sided p-value for the test. Notes ----- Because the normal approximation is used for the calculations, the samples used should be large. A typical rule is to require that n > 20. References ---------- .. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test """ if zero_method not in ["wilcox", "pratt", "zsplit"]: raise ValueError("Zero method should be either 'wilcox' " "or 'pratt' or 'zsplit'") if y is None: d = asarray(x) else: x, y = map(asarray, (x, y)) if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x - y if zero_method == "wilcox": # Keep all non-zero differences d = compress(np.not_equal(d, 0), d, axis=-1) count = len(d) if count < 10: warnings.warn("Warning: sample size too small for normal approximation.") r = stats.rankdata(abs(d)) r_plus = np.sum((d > 0) * r, axis=0) r_minus = np.sum((d < 0) * r, axis=0) if zero_method == "zsplit": r_zero = np.sum((d == 0) * r, axis=0) r_plus += r_zero / 2. r_minus += r_zero / 2. T = min(r_plus, r_minus) mn = count * (count + 1.) * 0.25 se = count * (count + 1.) * (2. * count + 1.) if zero_method == "pratt": r = r[d != 0] replist, repnum = find_repeats(r) if repnum.size != 0: # Correction for repeated elements. se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() se = sqrt(se / 24) correction = 0.5 * int(bool(correction)) * np.sign(T - mn) z = (T - mn - correction) / se prob = 2. * distributions.norm.sf(abs(z)) return WilcoxonResult(T, prob) def median_test(*args, **kwds): """ Mood's median test. Test that two or more samples come from populations with the same median. Let ``n = len(args)`` be the number of samples. The "grand median" of all the data is computed, and a contingency table is formed by classifying the values in each sample as being above or below the grand median. The contingency table, along with `correction` and `lambda_`, are passed to `scipy.stats.chi2_contingency` to compute the test statistic and p-value. Parameters ---------- sample1, sample2, ... : array_like The set of samples. There must be at least two samples. Each sample must be a one-dimensional sequence containing at least one value. The samples are not required to have the same length. ties : str, optional Determines how values equal to the grand median are classified in the contingency table. The string must be one of:: "below": Values equal to the grand median are counted as "below". "above": Values equal to the grand median are counted as "above". "ignore": Values equal to the grand median are not counted. The default is "below". correction : bool, optional If True, *and* there are just two samples, apply Yates' correction for continuity when computing the test statistic associated with the contingency table. Default is True. lambda_ : float or str, optional. By default, the statistic computed in this test is Pearson's chi-squared statistic. `lambda_` allows a statistic from the Cressie-Read power divergence family to be used instead. See `power_divergence` for details. Default is 1 (Pearson's chi-squared statistic). nan_policy : {'propagate', 'raise', 'omit'}, optional Defines how to handle when input contains nan. 'propagate' returns nan, 'raise' throws an error, 'omit' performs the calculations ignoring nan values. Default is 'propagate'. Returns ------- stat : float The test statistic. The statistic that is returned is determined by `lambda_`. The default is Pearson's chi-squared statistic. p : float The p-value of the test. m : float The grand median. table : ndarray The contingency table. The shape of the table is (2, n), where n is the number of samples. The first row holds the counts of the values above the grand median, and the second row holds the counts of the values below the grand median. The table allows further analysis with, for example, `scipy.stats.chi2_contingency`, or with `scipy.stats.fisher_exact` if there are two samples, without having to recompute the table. If ``nan_policy`` is "propagate" and there are nans in the input, the return value for ``table`` is ``None``. See Also -------- kruskal : Compute the Kruskal-Wallis H-test for independent samples. mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. Notes ----- .. versionadded:: 0.15.0 References ---------- .. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill (1950), pp. 394-399. .. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). See Sections 8.12 and 10.15. Examples -------- A biologist runs an experiment in which there are three groups of plants. Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. Each plant produces a number of seeds. The seed counts for each group are:: Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 The following code applies Mood's median test to these samples. >>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] >>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] >>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] >>> from scipy.stats import median_test >>> stat, p, med, tbl = median_test(g1, g2, g3) The median is >>> med 34.0 and the contingency table is >>> tbl array([[ 5, 10, 7], [11, 5, 10]]) `p` is too large to conclude that the medians are not the same: >>> p 0.12609082774093244 The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to `median_test`. >>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") >>> p 0.12224779737117837 The median occurs several times in the data, so we'll get a different result if, for example, ``ties="above"`` is used: >>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") >>> p 0.063873276069553273 >>> tbl array([[ 5, 11, 9], [11, 4, 8]]) This example demonstrates that if the data set is not large and there are values equal to the median, the p-value can be sensitive to the choice of `ties`. """ ties = kwds.pop('ties', 'below') correction = kwds.pop('correction', True) lambda_ = kwds.pop('lambda_', None) nan_policy = kwds.pop('nan_policy', 'propagate') if len(kwds) > 0: bad_kwd = kwds.keys()[0] raise TypeError("median_test() got an unexpected keyword " "argument %r" % bad_kwd) if len(args) < 2: raise ValueError('median_test requires two or more samples.') ties_options = ['below', 'above', 'ignore'] if ties not in ties_options: raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " "of: %s" % (ties, str(ties_options)[1:-1])) data = [np.asarray(arg) for arg in args] # Validate the sizes and shapes of the arguments. for k, d in enumerate(data): if d.size == 0: raise ValueError("Sample %d is empty. All samples must " "contain at least one value." % (k + 1)) if d.ndim != 1: raise ValueError("Sample %d has %d dimensions. All " "samples must be one-dimensional sequences." % (k + 1, d.ndim)) cdata = np.concatenate(data) contains_nan, nan_policy = _contains_nan(cdata, nan_policy) if contains_nan and nan_policy == 'propagate': return np.nan, np.nan, np.nan, None if contains_nan: grand_median = np.median(cdata[~np.isnan(cdata)]) else: grand_median = np.median(cdata) # When the minimum version of numpy supported by scipy is 1.9.0, # the above if/else statement can be replaced by the single line: # grand_median = np.nanmedian(cdata) # Create the contingency table. table = np.zeros((2, len(data)), dtype=np.int64) for k, sample in enumerate(data): sample = sample[~np.isnan(sample)] nabove = count_nonzero(sample > grand_median) nbelow = count_nonzero(sample < grand_median) nequal = sample.size - (nabove + nbelow) table[0, k] += nabove table[1, k] += nbelow if ties == "below": table[1, k] += nequal elif ties == "above": table[0, k] += nequal # Check that no row or column of the table is all zero. # Such a table can not be given to chi2_contingency, because it would have # a zero in the table of expected frequencies. rowsums = table.sum(axis=1) if rowsums[0] == 0: raise ValueError("All values are below the grand median (%r)." % grand_median) if rowsums[1] == 0: raise ValueError("All values are above the grand median (%r)." % grand_median) if ties == "ignore": # We already checked that each sample has at least one value, but it # is possible that all those values equal the grand median. If `ties` # is "ignore", that would result in a column of zeros in `table`. We # check for that case here. zero_cols = np.where((table == 0).all(axis=0))[0] if len(zero_cols) > 0: msg = ("All values in sample %d are equal to the grand " "median (%r), so they are ignored, resulting in an " "empty sample." % (zero_cols[0] + 1, grand_median)) raise ValueError(msg) stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, correction=correction) return stat, p, grand_median, table def _circfuncs_common(samples, high, low): samples = np.asarray(samples) if samples.size == 0: return np.nan, np.nan ang = (samples - low)*2.*pi / (high - low) return samples, ang def circmean(samples, high=2*pi, low=0, axis=None): """ Compute the circular mean for samples in a range. Parameters ---------- samples : array_like Input array. high : float or int, optional High boundary for circular mean range. Default is ``2*pi``. low : float or int, optional Low boundary for circular mean range. Default is 0. axis : int, optional Axis along which means are computed. The default is to compute the mean of the flattened array. Returns ------- circmean : float Circular mean. Examples -------- >>> from scipy.stats import circmean >>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3]) 0.2 >>> from scipy.stats import circmean >>> circmean([0.2, 1.4, 2.6], high = 1, low = 0) 0.4 """ samples, ang = _circfuncs_common(samples, high, low) S = sin(ang).sum(axis=axis) C = cos(ang).sum(axis=axis) res = arctan2(S, C) mask = res < 0 if mask.ndim > 0: res[mask] += 2*pi elif mask: res += 2*pi return res*(high - low)/2.0/pi + low def circvar(samples, high=2*pi, low=0, axis=None): """ Compute the circular variance for samples assumed to be in a range Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular variance range. Default is 0. high : float or int, optional High boundary for circular variance range. Default is ``2*pi``. axis : int, optional Axis along which variances are computed. The default is to compute the variance of the flattened array. Returns ------- circvar : float Circular variance. Notes ----- This uses a definition of circular variance that in the limit of small angles returns a number close to the 'linear' variance. Examples -------- >>> from scipy.stats import circvar >>> circvar([0, 2*np.pi/3, 5*np.pi/3]) 2.19722457734 """ samples, ang = _circfuncs_common(samples, high, low) S = sin(ang).mean(axis=axis) C = cos(ang).mean(axis=axis) R = hypot(S, C) return ((high - low)/2.0/pi)**2 * 2 * log(1/R) def circstd(samples, high=2*pi, low=0, axis=None): """ Compute the circular standard deviation for samples assumed to be in the range [low to high]. Parameters ---------- samples : array_like Input array. low : float or int, optional Low boundary for circular standard deviation range. Default is 0. high : float or int, optional High boundary for circular standard deviation range. Default is ``2*pi``. axis : int, optional Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array. Returns ------- circstd : float Circular standard deviation. Notes ----- This uses a definition of circular standard deviation that in the limit of small angles returns a number close to the 'linear' standard deviation. Examples -------- >>> from scipy.stats import circstd >>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2]) 0.063564063306 """ samples, ang = _circfuncs_common(samples, high, low) S = sin(ang).mean(axis=axis) C = cos(ang).mean(axis=axis) R = hypot(S, C) return ((high - low)/2.0/pi) * sqrt(-2*log(R))
93,860
32.811599
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_multivariate.py
# # Author: Joris Vankerschaver 2013 # from __future__ import division, print_function, absolute_import import math import numpy as np import scipy.linalg from scipy.misc import doccer from scipy.special import gammaln, psi, multigammaln, xlogy, entr from scipy._lib._util import check_random_state from scipy.linalg.blas import drot from ._discrete_distns import binom from . import mvn __all__ = ['multivariate_normal', 'matrix_normal', 'dirichlet', 'wishart', 'invwishart', 'multinomial', 'special_ortho_group', 'ortho_group', 'random_correlation', 'unitary_group'] _LOG_2PI = np.log(2 * np.pi) _LOG_2 = np.log(2) _LOG_PI = np.log(np.pi) _doc_random_state = """\ random_state : None or int or np.random.RandomState instance, optional If int or RandomState, use it for drawing the random variates. If None (or np.random), the global np.random state is used. Default is None. """ def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): """ Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility. """ if rcond is not None: cond = rcond if cond in [None, -1]: t = spectrum.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps eps = cond * np.max(abs(spectrum)) return eps def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) class _PSD(object): """ Compute coordinated functions of a symmetric positive semidefinite matrix. This class addresses two issues. Firstly it allows the pseudoinverse, the logarithm of the pseudo-determinant, and the rank of the matrix to be computed using one call to eigh instead of three. Secondly it allows these functions to be computed in a way that gives mutually compatible results. All of the functions are computed with a common understanding as to which of the eigenvalues are to be considered negligibly small. The functions are designed to coordinate with scipy.linalg.pinvh() but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). Parameters ---------- M : array_like Symmetric positive semidefinite matrix (2-D). cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of M. (Default: lower) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. allow_singular : bool, optional Whether to allow a singular matrix. (Default: True) Notes ----- The arguments are similar to those of scipy.linalg.pinvh(). """ def __init__(self, M, cond=None, rcond=None, lower=True, check_finite=True, allow_singular=True): # Compute the symmetric eigendecomposition. # Note that eigh takes care of array conversion, chkfinite, # and assertion that the matrix is square. s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) eps = _eigvalsh_to_eps(s, cond, rcond) if np.min(s) < -eps: raise ValueError('the input matrix must be positive semidefinite') d = s[s > eps] if len(d) < len(s) and not allow_singular: raise np.linalg.LinAlgError('singular matrix') s_pinv = _pinv_1d(s, eps) U = np.multiply(u, np.sqrt(s_pinv)) # Initialize the eagerly precomputed attributes. self.rank = len(d) self.U = U self.log_pdet = np.sum(np.log(d)) # Initialize an attribute to be lazily computed. self._pinv = None @property def pinv(self): if self._pinv is None: self._pinv = np.dot(self.U, self.U.T) return self._pinv class multi_rv_generic(object): """ Class which encapsulates common functionality between all multivariate distributions. """ def __init__(self, seed=None): super(multi_rv_generic, self).__init__() self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def _get_random_state(self, random_state): if random_state is not None: return check_random_state(random_state) else: return self._random_state class multi_rv_frozen(object): """ Class which encapsulates common functionality between all frozen multivariate distributions. """ @property def random_state(self): return self._dist._random_state @random_state.setter def random_state(self, seed): self._dist._random_state = check_random_state(seed) _mvn_doc_default_callparams = """\ mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional Whether to allow a singular covariance matrix. (Default: False) """ _mvn_doc_callparams_note = \ """Setting the parameter `mean` to `None` is equivalent to having `mean` be the zero-vector. The parameter `cov` can be a scalar, in which case the covariance matrix is the identity times that value, a vector of diagonal entries for the covariance matrix, or a two-dimensional array_like. """ _mvn_doc_frozen_callparams = "" _mvn_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" mvn_docdict_params = { '_mvn_doc_default_callparams': _mvn_doc_default_callparams, '_mvn_doc_callparams_note': _mvn_doc_callparams_note, '_doc_random_state': _doc_random_state } mvn_docdict_noparams = { '_mvn_doc_default_callparams': _mvn_doc_frozen_callparams, '_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class multivariate_normal_gen(multi_rv_generic): r""" A multivariate normal random variable. The `mean` keyword specifies the mean. The `cov` keyword specifies the covariance matrix. Methods ------- ``pdf(x, mean=None, cov=1, allow_singular=False)`` Probability density function. ``logpdf(x, mean=None, cov=1, allow_singular=False)`` Log of the probability density function. ``cdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)`` Cumulative distribution function. ``logcdf(x, mean=None, cov=1, allow_singular=False, maxpts=1000000*dim, abseps=1e-5, releps=1e-5)`` Log of the cumulative distribution function. ``rvs(mean=None, cov=1, size=1, random_state=None)`` Draw random samples from a multivariate normal distribution. ``entropy()`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the mean and covariance parameters, returning a "frozen" multivariate normal random variable: rv = multivariate_normal(mean=None, cov=1, allow_singular=False) - Frozen object with the same methods but holding the given mean and covariance fixed. Notes ----- %(_mvn_doc_callparams_note)s The covariance matrix `cov` must be a (symmetric) positive semi-definite matrix. The determinant and inverse of `cov` are computed as the pseudo-determinant and pseudo-inverse, respectively, so that `cov` does not need to have full rank. The probability density function for `multivariate_normal` is .. math:: f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, and :math:`k` is the dimension of the space where :math:`x` takes values. .. versionadded:: 0.14.0 Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import multivariate_normal >>> x = np.linspace(0, 5, 10, endpoint=False) >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) >>> fig1 = plt.figure() >>> ax = fig1.add_subplot(111) >>> ax.plot(x, y) The input quantiles can be any shape of array, as long as the last axis labels the components. This allows us for instance to display the frozen pdf for a non-isotropic random variable in 2D as follows: >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] >>> pos = np.dstack((x, y)) >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) >>> fig2 = plt.figure() >>> ax2 = fig2.add_subplot(111) >>> ax2.contourf(x, y, rv.pdf(pos)) """ def __init__(self, seed=None): super(multivariate_normal_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params) def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information. """ return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed) def _process_parameters(self, dim, mean, cov): """ Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix. """ # Try to infer dimensionality if dim is None: if mean is None: if cov is None: dim = 1 else: cov = np.asarray(cov, dtype=float) if cov.ndim < 2: dim = 1 else: dim = cov.shape[0] else: mean = np.asarray(mean, dtype=float) dim = mean.size else: if not np.isscalar(dim): raise ValueError("Dimension of random variable must be a scalar.") # Check input sizes and return full arrays for mean and cov if necessary if mean is None: mean = np.zeros(dim) mean = np.asarray(mean, dtype=float) if cov is None: cov = 1.0 cov = np.asarray(cov, dtype=float) if dim == 1: mean.shape = (1,) cov.shape = (1, 1) if mean.ndim != 1 or mean.shape[0] != dim: raise ValueError("Array 'mean' must be a vector of length %d." % dim) if cov.ndim == 0: cov = cov * np.eye(dim) elif cov.ndim == 1: cov = np.diag(cov) elif cov.ndim == 2 and cov.shape != (dim, dim): rows, cols = cov.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(cov.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'mean' is a vector of length %d.") msg = msg % (str(cov.shape), len(mean)) raise ValueError(msg) elif cov.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % cov.ndim) return dim, mean, cov def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x def _logpdf(self, x, mean, prec_U, log_det_cov, rank): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ dev = x - mean maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) def logpdf(self, x, mean=None, cov=1, allow_singular=False): """ Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) return _squeeze_output(out) def pdf(self, x, mean=None, cov=1, allow_singular=False): """ Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Probability density function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) return _squeeze_output(out) def _cdf(self, x, mean, cov, maxpts, abseps, releps): """ Parameters ---------- x : ndarray Points at which to evaluate the cumulative distribution function. mean : ndarray Mean of the distribution cov : array_like Covariance matrix of the distribution maxpts: integer The maximum number of points to use for integration abseps: float Absolute error tolerance releps: float Relative error tolerance Notes ----- As this function does no argument checking, it should not be called directly; use 'cdf' instead. .. versionadded:: 1.0.0 """ lower = np.full(mean.shape, -np.inf) # mvnun expects 1-d arguments, so process points sequentially func1d = lambda x_slice: mvn.mvnun(lower, x_slice, mean, cov, maxpts, abseps, releps)[0] out = np.apply_along_axis(func1d, -1, x) return _squeeze_output(out) def logcdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-5, releps=1e-5): """ Log of the multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Log of the cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0 """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) # Use _PSD to check covariance matrix _PSD(cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * dim out = np.log(self._cdf(x, mean, cov, maxpts, abseps, releps)) return out def cdf(self, x, mean=None, cov=1, allow_singular=False, maxpts=None, abseps=1e-5, releps=1e-5): """ Multivariate normal cumulative distribution function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_mvn_doc_default_callparams)s maxpts: integer, optional The maximum number of points to use for integration (default `1000000*dim`) abseps: float, optional Absolute error tolerance (default 1e-5) releps: float, optional Relative error tolerance (default 1e-5) Returns ------- cdf : ndarray or scalar Cumulative distribution function evaluated at `x` Notes ----- %(_mvn_doc_callparams_note)s .. versionadded:: 1.0.0 """ dim, mean, cov = self._process_parameters(None, mean, cov) x = self._process_quantiles(x, dim) # Use _PSD to check covariance matrix _PSD(cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * dim out = self._cdf(x, mean, cov, maxpts, abseps, releps) return out def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate normal distribution. Parameters ---------- %(_mvn_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) random_state = self._get_random_state(random_state) out = random_state.multivariate_normal(mean, cov, size) return _squeeze_output(out) def entropy(self, mean=None, cov=1): """ Compute the differential entropy of the multivariate normal. Parameters ---------- %(_mvn_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_mvn_doc_callparams_note)s """ dim, mean, cov = self._process_parameters(None, mean, cov) _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) return 0.5 * logdet multivariate_normal = multivariate_normal_gen() class multivariate_normal_frozen(multi_rv_frozen): def __init__(self, mean=None, cov=1, allow_singular=False, seed=None, maxpts=None, abseps=1e-5, releps=1e-5): """ Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. maxpts: integer, optional The maximum number of points to use for integration of the cumulative distribution function (default `1000000*dim`) abseps: float, optional Absolute error tolerance for the cumulative distribution function (default 1e-5) releps: float, optional Relative error tolerance for the cumulative distribution function (default 1e-5) Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]]) """ self._dist = multivariate_normal_gen(seed) self.dim, self.mean, self.cov = self._dist._process_parameters( None, mean, cov) self.cov_info = _PSD(self.cov, allow_singular=allow_singular) if not maxpts: maxpts = 1000000 * self.dim self.maxpts = maxpts self.abseps = abseps self.releps = releps def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.mean, self.cov_info.U, self.cov_info.log_pdet, self.cov_info.rank) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def logcdf(self, x): return np.log(self.cdf(x)) def cdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._cdf(x, self.mean, self.cov, self.maxpts, self.abseps, self.releps) return _squeeze_output(out) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.mean, self.cov, size, random_state) def entropy(self): """ Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution """ log_pdet = self.cov_info.log_pdet rank = self.cov_info.rank return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'logcdf', 'cdf', 'rvs']: method = multivariate_normal_gen.__dict__[name] method_frozen = multivariate_normal_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params) _matnorm_doc_default_callparams = """\ mean : array_like, optional Mean of the distribution (default: `None`) rowcov : array_like, optional Among-row covariance matrix of the distribution (default: `1`) colcov : array_like, optional Among-column covariance matrix of the distribution (default: `1`) """ _matnorm_doc_callparams_note = \ """If `mean` is set to `None` then a matrix of zeros is used for the mean. The dimensions of this matrix are inferred from the shape of `rowcov` and `colcov`, if these are provided, or set to `1` if ambiguous. `rowcov` and `colcov` can be two-dimensional array_likes specifying the covariance matrices directly. Alternatively, a one-dimensional array will be be interpreted as the entries of a diagonal matrix, and a scalar or zero-dimensional array will be interpreted as this value times the identity matrix. """ _matnorm_doc_frozen_callparams = "" _matnorm_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" matnorm_docdict_params = { '_matnorm_doc_default_callparams': _matnorm_doc_default_callparams, '_matnorm_doc_callparams_note': _matnorm_doc_callparams_note, '_doc_random_state': _doc_random_state } matnorm_docdict_noparams = { '_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams, '_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class matrix_normal_gen(multi_rv_generic): r""" A matrix normal random variable. The `mean` keyword specifies the mean. The `rowcov` keyword specifies the among-row covariance matrix. The 'colcov' keyword specifies the among-column covariance matrix. Methods ------- ``pdf(X, mean=None, rowcov=1, colcov=1)`` Probability density function. ``logpdf(X, mean=None, rowcov=1, colcov=1)`` Log of the probability density function. ``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)`` Draw random samples. Parameters ---------- X : array_like Quantiles, with the last two axes of `X` denoting the components. %(_matnorm_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the mean and covariance parameters, returning a "frozen" matrix normal random variable: rv = matrix_normal(mean=None, rowcov=1, colcov=1) - Frozen object with the same methods but holding the given mean and covariance fixed. Notes ----- %(_matnorm_doc_callparams_note)s The covariance matrices specified by `rowcov` and `colcov` must be (symmetric) positive definite. If the samples in `X` are :math:`m \times n`, then `rowcov` must be :math:`m \times m` and `colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`. The probability density function for `matrix_normal` is .. math:: f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}} \exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1} (X-M)^T \right] \right), where :math:`M` is the mean, :math:`U` the among-row covariance matrix, :math:`V` the among-column covariance matrix. The `allow_singular` behaviour of the `multivariate_normal` distribution is not currently supported. Covariance matrices must be full rank. The `matrix_normal` distribution is closely related to the `multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)` (the vector formed by concatenating the columns of :math:`X`) has a multivariate normal distribution with mean :math:`\mathrm{Vec}(M)` and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker product). Sampling and pdf evaluation are :math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but :math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal, making this equivalent form algorithmically inefficient. .. versionadded:: 0.17.0 Examples -------- >>> from scipy.stats import matrix_normal >>> M = np.arange(6).reshape(3,2); M array([[0, 1], [2, 3], [4, 5]]) >>> U = np.diag([1,2,3]); U array([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) >>> V = 0.3*np.identity(2); V array([[ 0.3, 0. ], [ 0. , 0.3]]) >>> X = M + 0.1; X array([[ 0.1, 1.1], [ 2.1, 3.1], [ 4.1, 5.1]]) >>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) 0.023410202050005054 >>> # Equivalent multivariate normal >>> from scipy.stats import multivariate_normal >>> vectorised_X = X.T.flatten() >>> equiv_mean = M.T.flatten() >>> equiv_cov = np.kron(V,U) >>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov) 0.023410202050005054 """ def __init__(self, seed=None): super(matrix_normal_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params) def __call__(self, mean=None, rowcov=1, colcov=1, seed=None): """ Create a frozen matrix normal distribution. See `matrix_normal_frozen` for more information. """ return matrix_normal_frozen(mean, rowcov, colcov, seed=seed) def _process_parameters(self, mean, rowcov, colcov): """ Infer dimensionality from mean or covariance matrices. Handle defaults. Ensure compatible dimensions. """ # Process mean if mean is not None: mean = np.asarray(mean, dtype=float) meanshape = mean.shape if len(meanshape) != 2: raise ValueError("Array `mean` must be two dimensional.") if np.any(meanshape == 0): raise ValueError("Array `mean` has invalid shape.") # Process among-row covariance rowcov = np.asarray(rowcov, dtype=float) if rowcov.ndim == 0: if mean is not None: rowcov = rowcov * np.identity(meanshape[0]) else: rowcov = rowcov * np.identity(1) elif rowcov.ndim == 1: rowcov = np.diag(rowcov) rowshape = rowcov.shape if len(rowshape) != 2: raise ValueError("`rowcov` must be a scalar or a 2D array.") if rowshape[0] != rowshape[1]: raise ValueError("Array `rowcov` must be square.") if rowshape[0] == 0: raise ValueError("Array `rowcov` has invalid shape.") numrows = rowshape[0] # Process among-column covariance colcov = np.asarray(colcov, dtype=float) if colcov.ndim == 0: if mean is not None: colcov = colcov * np.identity(meanshape[1]) else: colcov = colcov * np.identity(1) elif colcov.ndim == 1: colcov = np.diag(colcov) colshape = colcov.shape if len(colshape) != 2: raise ValueError("`colcov` must be a scalar or a 2D array.") if colshape[0] != colshape[1]: raise ValueError("Array `colcov` must be square.") if colshape[0] == 0: raise ValueError("Array `colcov` has invalid shape.") numcols = colshape[0] # Ensure mean and covariances compatible if mean is not None: if meanshape[0] != numrows: raise ValueError("Arrays `mean` and `rowcov` must have the" "same number of rows.") if meanshape[1] != numcols: raise ValueError("Arrays `mean` and `colcov` must have the" "same number of columns.") else: mean = np.zeros((numrows,numcols)) dims = (numrows, numcols) return dims, mean, rowcov, colcov def _process_quantiles(self, X, dims): """ Adjust quantiles array so that last two axes labels the components of each data point. """ X = np.asarray(X, dtype=float) if X.ndim == 2: X = X[np.newaxis, :] if X.shape[-2:] != dims: raise ValueError("The shape of array `X` is not compatible " "with the distribution parameters.") return X def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov, col_prec_rt, log_det_colcov): """ Parameters ---------- dims : tuple Dimensions of the matrix variates X : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution row_prec_rt : ndarray A decomposition such that np.dot(row_prec_rt, row_prec_rt.T) is the inverse of the among-row covariance matrix log_det_rowcov : float Logarithm of the determinant of the among-row covariance matrix col_prec_rt : ndarray A decomposition such that np.dot(col_prec_rt, col_prec_rt.T) is the inverse of the among-column covariance matrix log_det_colcov : float Logarithm of the determinant of the among-column covariance matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ numrows, numcols = dims roll_dev = np.rollaxis(X-mean, axis=-1, start=0) scale_dev = np.tensordot(col_prec_rt.T, np.dot(roll_dev, row_prec_rt), 1) maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0) return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov + numrows*log_det_colcov + maha) def logpdf(self, X, mean=None, rowcov=1, colcov=1): """ Log of the matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of `X` denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- logpdf : ndarray Log of the probability density function evaluated at `X` Notes ----- %(_matnorm_doc_callparams_note)s """ dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, colcov) X = self._process_quantiles(X, dims) rowpsd = _PSD(rowcov, allow_singular=False) colpsd = _PSD(colcov, allow_singular=False) out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, colpsd.log_pdet) return _squeeze_output(out) def pdf(self, X, mean=None, rowcov=1, colcov=1): """ Matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of `X` denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `X` Notes ----- %(_matnorm_doc_callparams_note)s """ return np.exp(self.logpdf(X, mean, rowcov, colcov)) def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None): """ Draw random samples from a matrix normal distribution. Parameters ---------- %(_matnorm_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `dims`), where `dims` is the dimension of the random matrices. Notes ----- %(_matnorm_doc_callparams_note)s """ size = int(size) dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, colcov) rowchol = scipy.linalg.cholesky(rowcov, lower=True) colchol = scipy.linalg.cholesky(colcov, lower=True) random_state = self._get_random_state(random_state) std_norm = random_state.standard_normal(size=(dims[1],size,dims[0])) roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1) out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:] if size == 1: #out = np.squeeze(out, axis=0) out = out.reshape(mean.shape) return out matrix_normal = matrix_normal_gen() class matrix_normal_frozen(multi_rv_frozen): def __init__(self, mean=None, rowcov=1, colcov=1, seed=None): """ Create a frozen matrix normal distribution. Parameters ---------- %(_matnorm_doc_default_callparams)s seed : None or int or np.random.RandomState instance, optional If int or RandomState, use it for drawing the random variates. If None (or np.random), the global np.random state is used. Default is None. Examples -------- >>> from scipy.stats import matrix_normal >>> distn = matrix_normal(mean=np.zeros((3,3))) >>> X = distn.rvs(); X array([[-0.02976962, 0.93339138, -0.09663178], [ 0.67405524, 0.28250467, -0.93308929], [-0.31144782, 0.74535536, 1.30412916]]) >>> distn.pdf(X) 2.5160642368346784e-05 >>> distn.logpdf(X) -10.590229595124615 """ self._dist = matrix_normal_gen(seed) self.dims, self.mean, self.rowcov, self.colcov = \ self._dist._process_parameters(mean, rowcov, colcov) self.rowpsd = _PSD(self.rowcov, allow_singular=False) self.colpsd = _PSD(self.colcov, allow_singular=False) def logpdf(self, X): X = self._dist._process_quantiles(X, self.dims) out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U, self.rowpsd.log_pdet, self.colpsd.U, self.colpsd.log_pdet) return _squeeze_output(out) def pdf(self, X): return np.exp(self.logpdf(X)) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.mean, self.rowcov, self.colcov, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # matrix_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs']: method = matrix_normal_gen.__dict__[name] method_frozen = matrix_normal_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params) _dirichlet_doc_default_callparams = """\ alpha : array_like The concentration parameters. The number of entries determines the dimensionality of the distribution. """ _dirichlet_doc_frozen_callparams = "" _dirichlet_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" dirichlet_docdict_params = { '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, '_doc_random_state': _doc_random_state } dirichlet_docdict_noparams = { '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, '_doc_random_state': _doc_random_state } def _dirichlet_check_parameters(alpha): alpha = np.asarray(alpha) if np.min(alpha) <= 0: raise ValueError("All parameters must be greater than 0") elif alpha.ndim != 1: raise ValueError("Parameter vector 'a' must be one dimensional, " "but a.shape = %s." % (alpha.shape, )) return alpha def _dirichlet_check_input(alpha, x): x = np.asarray(x) if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: raise ValueError("Vector 'x' must have either the same number " "of entries as, or one entry fewer than, " "parameter vector 'a', but alpha.shape = %s " "and x.shape = %s." % (alpha.shape, x.shape)) if x.shape[0] != alpha.shape[0]: xk = np.array([1 - np.sum(x, 0)]) if xk.ndim == 1: x = np.append(x, xk) elif xk.ndim == 2: x = np.vstack((x, xk)) else: raise ValueError("The input must be one dimensional or a two " "dimensional matrix containing the entries.") if np.min(x) < 0: raise ValueError("Each entry in 'x' must be greater than or equal to zero.") if np.max(x) > 1: raise ValueError("Each entry in 'x' must be smaller or equal one.") # Check x_i > 0 or alpha_i > 1 xeq0 = (x == 0) alphalt1 = (alpha < 1) if x.shape != alpha.shape: alphalt1 = np.repeat(alphalt1, x.shape[-1], axis=-1).reshape(x.shape) chk = np.logical_and(xeq0, alphalt1) if np.sum(chk): raise ValueError("Each entry in 'x' must be greater than zero if its alpha is less than one.") if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): raise ValueError("The input vector 'x' must lie within the normal " "simplex. but np.sum(x, 0) = %s." % np.sum(x, 0)) return x def _lnB(alpha): r""" Internal helper function to compute the log of the useful quotient .. math:: B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only """ return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) class dirichlet_gen(multi_rv_generic): r""" A Dirichlet random variable. The `alpha` keyword specifies the concentration parameters of the distribution. .. versionadded:: 0.15.0 Methods ------- ``pdf(x, alpha)`` Probability density function. ``logpdf(x, alpha)`` Log of the probability density function. ``rvs(alpha, size=1, random_state=None)`` Draw random samples from a Dirichlet distribution. ``mean(alpha)`` The mean of the Dirichlet distribution ``var(alpha)`` The variance of the Dirichlet distribution ``entropy(alpha)`` Compute the differential entropy of the Dirichlet distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix concentration parameters, returning a "frozen" Dirichlet random variable: rv = dirichlet(alpha) - Frozen object with the same methods but holding the given concentration parameters fixed. Notes ----- Each :math:`\alpha` entry must be positive. The distribution has only support on the simplex defined by .. math:: \sum_{i=1}^{K} x_i \le 1 The probability density function for `dirichlet` is .. math:: f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} where .. math:: \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the concentration parameters and :math:`K` is the dimension of the space where :math:`x` takes values. Note that the dirichlet interface is somewhat inconsistent. The array returned by the rvs function is transposed with respect to the format expected by the pdf and logpdf. """ def __init__(self, seed=None): super(dirichlet_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) def __call__(self, alpha, seed=None): return dirichlet_frozen(alpha, seed=seed) def _logpdf(self, x, alpha): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function %(_dirichlet_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ lnB = _lnB(alpha) return - lnB + np.sum((xlogy(alpha - 1, x.T)).T, 0) def logpdf(self, x, alpha): """ Log of the Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Log of the probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = self._logpdf(x, alpha) return _squeeze_output(out) def pdf(self, x, alpha): """ The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray or scalar The probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = np.exp(self._logpdf(x, alpha)) return _squeeze_output(out) def mean(self, alpha): """ Compute the mean of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- mu : ndarray or scalar Mean of the Dirichlet distribution. """ alpha = _dirichlet_check_parameters(alpha) out = alpha / (np.sum(alpha)) return _squeeze_output(out) def var(self, alpha): """ Compute the variance of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- v : ndarray or scalar Variance of the Dirichlet distribution. """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) return _squeeze_output(out) def entropy(self, alpha): """ Compute the differential entropy of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- h : scalar Entropy of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) lnB = _lnB(alpha) K = alpha.shape[0] out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( (alpha - 1) * scipy.special.psi(alpha)) return _squeeze_output(out) def rvs(self, alpha, size=1, random_state=None): """ Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. """ alpha = _dirichlet_check_parameters(alpha) random_state = self._get_random_state(random_state) return random_state.dirichlet(alpha, size=size) dirichlet = dirichlet_gen() class dirichlet_frozen(multi_rv_frozen): def __init__(self, alpha, seed=None): self.alpha = _dirichlet_check_parameters(alpha) self._dist = dirichlet_gen(seed) def logpdf(self, x): return self._dist.logpdf(x, self.alpha) def pdf(self, x): return self._dist.pdf(x, self.alpha) def mean(self): return self._dist.mean(self.alpha) def var(self): return self._dist.var(self.alpha) def entropy(self): return self._dist.entropy(self.alpha) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.alpha, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']: method = dirichlet_gen.__dict__[name] method_frozen = dirichlet_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, dirichlet_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) _wishart_doc_default_callparams = """\ df : int Degrees of freedom, must be greater than or equal to dimension of the scale matrix scale : array_like Symmetric positive definite scale matrix of the distribution """ _wishart_doc_callparams_note = "" _wishart_doc_frozen_callparams = "" _wishart_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" wishart_docdict_params = { '_doc_default_callparams': _wishart_doc_default_callparams, '_doc_callparams_note': _wishart_doc_callparams_note, '_doc_random_state': _doc_random_state } wishart_docdict_noparams = { '_doc_default_callparams': _wishart_doc_frozen_callparams, '_doc_callparams_note': _wishart_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class wishart_gen(multi_rv_generic): r""" A Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal precision matrix (the inverse of the covariance matrix). Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from a Wishart distribution. ``entropy()`` Compute the differential entropy of the Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" Wishart random variable: rv = wishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- invwishart, chi2 Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The Wishart distribution is often denoted .. math:: W_p(\nu, \Sigma) where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the :math:`p \times p` scale matrix. The probability density function for `wishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} \exp\left( -tr(\Sigma^{-1} S) / 2 \right) If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). If the scale matrix is 1-dimensional and equal to one, then the Wishart distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` distribution. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import wishart, chi2 >>> x = np.linspace(1e-5, 8, 100) >>> w = wishart.pdf(x, df=3, scale=1); w[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> c = chi2.pdf(x, 3); c[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> plt.plot(x, w) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(wishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen Wishart distribution. See `wishart_frozen` for more information. """ return wishart_frozen(df, scale, seed) def _process_parameters(self, df, scale): if scale is None: scale = 1.0 scale = np.asarray(scale, dtype=float) if scale.ndim == 0: scale = scale[np.newaxis,np.newaxis] elif scale.ndim == 1: scale = np.diag(scale) elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: raise ValueError("Array 'scale' must be square if it is two" " dimensional, but scale.scale = %s." % str(scale.shape)) elif scale.ndim > 2: raise ValueError("Array 'scale' must be at most two-dimensional," " but scale.ndim = %d" % scale.ndim) dim = scale.shape[0] if df is None: df = dim elif not np.isscalar(df): raise ValueError("Degrees of freedom must be a scalar.") elif df < dim: raise ValueError("Degrees of freedom cannot be less than dimension" " of scale matrix, but df = %d" % df) return dim, df, scale def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x * np.eye(dim)[:, :, np.newaxis] if x.ndim == 1: if dim == 1: x = x[np.newaxis, np.newaxis, :] else: x = np.diag(x)[:, :, np.newaxis] elif x.ndim == 2: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square if they are two" " dimensional, but x.shape = %s." % str(x.shape)) x = x[:, :, np.newaxis] elif x.ndim == 3: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square in the first two" " dimensions if they are three dimensional" ", but x.shape = %s." % str(x.shape)) elif x.ndim > 3: raise ValueError("Quantiles must be at most two-dimensional with" " an additional dimension for multiple" "components, but x.ndim = %d" % x.ndim) # Now we have 3-dim array; should have shape [dim, dim, *] if not x.shape[0:2] == (dim, dim): raise ValueError('Quantiles have incompatible dimensions: should' ' be %s, got %s.' % ((dim, dim), x.shape[0:2])) return x def _process_size(self, size): size = np.asarray(size) if size.ndim == 0: size = size[np.newaxis] elif size.ndim > 1: raise ValueError('Size must be an integer or tuple of integers;' ' thus must have dimension <= 1.' ' Got size.ndim = %s' % str(tuple(size))) n = size.prod() shape = tuple(size) return n, shape def _logpdf(self, x, dim, df, scale, log_det_scale, C): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triagular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ # log determinant of x # Note: x has components along the last axis, so that x.T has # components alone the 0-th axis. Then since det(A) = det(A'), this # gives us a 1-dim vector of determinants # Retrieve tr(scale^{-1} x) log_det_x = np.zeros(x.shape[-1]) scale_inv_x = np.zeros(x.shape) tr_scale_inv_x = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): _, log_det_x[i] = self._cholesky_logdet(x[:,:,i]) scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i]) tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace() # Log PDF out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + multigammaln(0.5*df, dim))) return out def logpdf(self, x, df, scale): """ Log of the Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) # Cholesky decomposition of scale, get log(det(scale)) C, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale, C) return _squeeze_output(out) def pdf(self, x, df, scale): """ Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ return df * scale def mean(self, df, scale): """ Mean of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ if df >= dim + 1: out = (df-dim-1) * scale else: out = None return out def mode(self, df, scale): """ Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) if out is not None else out def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ var = scale**2 diag = scale.diagonal() # 1 x dim array var += np.outer(diag, diag) var *= df return var def var(self, df, scale): """ Variance of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) def _standard_rvs(self, n, shape, dim, df, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : np.random.RandomState instance RandomState used for drawing the random variates. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ # Random normal variates for off-diagonal elements n_tril = dim * (dim-1) // 2 covariances = random_state.normal( size=n*n_tril).reshape(shape+(n_tril,)) # Random chi-square variates for diagonal elements variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T # Create the A matri(ces) - lower triangular A = np.zeros(shape + (dim, dim)) # Input the covariances size_idx = tuple([slice(None,None,None)]*len(shape)) tril_idx = np.tril_indices(dim, k=-1) A[size_idx + tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(dim) A[size_idx + diag_idx] = variances return A def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Calculate the matrices A, which are actually lower triangular # Cholesky factorizations of a matrix B such that B ~ W(df, I) A = self._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = C A A' C', where SA ~ W(df, scale) # Note: this is the product of a (lower) (lower) (lower)' (lower)' # or, denoting B = AA', it is C B C' where C is the lower # triangular Cholesky factorization of the scale matrix. # this appears to conflict with the instructions in [1]_, which # suggest that it should be D' B D where D is the lower # triangular factorization of the scale matrix. However, it is # meant to refer to the Bartlett (1933) representation of a # Wishart random variate as L A A' L' where L is lower triangular # so it appears that understanding D' to be upper triangular # is either a typo in or misreading of [1]_. for index in np.ndindex(shape): CA = np.dot(C, A[index]) A[index] = np.dot(CA, CA.T) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from a Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Cholesky decomposition of scale C = scipy.linalg.cholesky(scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def _entropy(self, dim, df, log_det_scale): """ Parameters ---------- dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'entropy' instead. """ return ( 0.5 * (dim+1) * log_det_scale + 0.5 * dim * (dim+1) * _LOG_2 + multigammaln(0.5*df, dim) - 0.5 * (df - dim - 1) * np.sum( [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] ) + 0.5 * df * dim ) def entropy(self, df, scale): """ Compute the differential entropy of the Wishart. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Wishart distribution Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) _, log_det_scale = self._cholesky_logdet(scale) return self._entropy(dim, df, log_det_scale) def _cholesky_logdet(self, scale): """ Compute Cholesky decomposition and determine (log(det(scale)). Parameters ---------- scale : ndarray Scale matrix. Returns ------- c_decomp : ndarray The Cholesky decomposition of `scale`. logdet : scalar The log of the determinant of `scale`. Notes ----- This computation of ``logdet`` is equivalent to ``np.linalg.slogdet(scale)``. It is ~2x faster though. """ c_decomp = scipy.linalg.cholesky(scale, lower=True) logdet = 2 * np.sum(np.log(c_decomp.diagonal())) return c_decomp, logdet wishart = wishart_gen() class wishart_frozen(multi_rv_frozen): """ Create a frozen Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ def __init__(self, df, scale, seed=None): self._dist = wishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale) self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale, self.C) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): return self._dist._entropy(self.dim, self.df, self.log_det_scale) # Set frozen generator docstrings from corresponding docstrings in # Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: method = wishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) from numpy import asarray_chkfinite, asarray from scipy.linalg.misc import LinAlgError from scipy.linalg.lapack import get_lapack_funcs def _cho_inv_batch(a, check_finite=True): """ Invert the matrices a_i, using a Cholesky factorization of A, where a_i resides in the last two dimensions of a and the other indices describe the index i. Overwrites the data in a. Parameters ---------- a : array Array of matrices to invert, where the matrices themselves are stored in the last two dimensions. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array Array of inverses of the matrices ``a_i``. See also -------- scipy.linalg.cholesky : Cholesky factorization of a matrix """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]: raise ValueError('expected square matrix in last two dimensions') potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,)) tril_idx = np.tril_indices(a.shape[-2], k=-1) triu_idx = np.triu_indices(a.shape[-2], k=1) for index in np.ndindex(a1.shape[:-2]): # Cholesky decomposition a1[index], info = potrf(a1[index], lower=True, overwrite_a=False, clean=False) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Inversion a1[index], info = potri(a1[index], lower=True, overwrite_c=False) if info > 0: raise LinAlgError("the inverse could not be computed") if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Make symmetric (dpotri only fills in the lower triangle) a1[index][triu_idx] = a1[index][tril_idx] return a1 class invwishart_gen(wishart_gen): r""" An inverse Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal covariance matrix. Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from an inverse Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" inverse Wishart random variable: rv = invwishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- wishart Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The inverse Wishart distribution is often denoted .. math:: W_p^{-1}(\nu, \Psi) where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the :math:`p \times p` scale matrix. The probability density function for `invwishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} \exp\left( -tr(\Sigma S^{-1}) / 2 \right) If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). If the scale matrix is 1-dimensional and equal to one, then the inverse Wishart distribution :math:`W_1(\nu, 1)` collapses to the inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` and scale = :math:`\frac{1}{2}`. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import invwishart, invgamma >>> x = np.linspace(0.01, 1, 100) >>> iw = invwishart.pdf(x, df=6, scale=1) >>> iw[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> ig = invgamma.pdf(x, 6/2., scale=1./2) >>> ig[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> plt.plot(x, iw) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(invwishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen inverse Wishart distribution. See `invwishart_frozen` for more information. """ return invwishart_frozen(df, scale, seed) def _logpdf(self, x, dim, df, scale, log_det_scale): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ log_det_x = np.zeros(x.shape[-1]) #scale_x_inv = np.zeros(x.shape) x_inv = np.copy(x).T if dim > 1: _cho_inv_batch(x_inv) # works in-place else: x_inv = 1./x_inv tr_scale_x_inv = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True) log_det_x[i] = 2 * np.sum(np.log(C.diagonal())) #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace() # Log PDF out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - multigammaln(0.5*df, dim)) return out def logpdf(self, x, df, scale): """ Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) _, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale) return _squeeze_output(out) def pdf(self, x, df, scale): """ Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ if df > dim + 1: out = scale / (df - dim - 1) else: out = None return out def mean(self, df, scale): """ Mean of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) if out is not None else out def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ return scale / (df + dim + 1) def mode(self, df, scale): """ Mode of the inverse Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ if df > dim + 3: var = (df - dim + 1) * scale**2 diag = scale.diagonal() # 1 x dim array var += (df - dim - 1) * np.outer(diag, diag) var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) else: var = None return var def var(self, df, scale): """ Variance of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) if out is not None else out def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triagular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Get random draws A such that A ~ W(df, I) A = super(invwishart_gen, self)._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) eye = np.eye(dim) trtrs = get_lapack_funcs(('trtrs'), (A,)) for index in np.ndindex(A.shape[:-2]): # Calculate CA CA = np.dot(C, A[index]) # Get (C A)^{-1} via triangular solver if dim > 1: CA, info = trtrs(CA, eye, lower=True) if info > 0: raise LinAlgError("Singular matrix.") if info < 0: raise ValueError('Illegal value in %d-th argument of' ' internal trtrs' % -info) else: CA = 1. / CA # Get SA A[index] = np.dot(CA.T, CA) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from an inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Invert the scale eye = np.eye(dim) L, lower = scipy.linalg.cho_factor(scale, lower=True) inv_scale = scipy.linalg.cho_solve((L, lower), eye) # Cholesky decomposition of inverted scale C = scipy.linalg.cholesky(inv_scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError invwishart = invwishart_gen() class invwishart_frozen(multi_rv_frozen): def __init__(self, df, scale, seed=None): """ Create a frozen inverse Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ self._dist = invwishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale ) # Get the determinant via Cholesky factorization C, lower = scipy.linalg.cho_factor(self.scale, lower=True) self.log_det_scale = 2 * np.sum(np.log(C.diagonal())) # Get the inverse using the Cholesky factorization eye = np.eye(self.dim) self.inv_scale = scipy.linalg.cho_solve((C, lower), eye) # Get the Cholesky factorization of the inverse scale self.C = scipy.linalg.cholesky(self.inv_scale, lower=True) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError # Set frozen generator docstrings from corresponding docstrings in # inverse Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: method = invwishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) _multinomial_doc_default_callparams = """\ n : int Number of trials p : array_like Probability of a trial falling into each category; should sum to 1 """ _multinomial_doc_callparams_note = \ """`n` should be a positive integer. Each element of `p` should be in the interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to 1, the last element of the `p` array is not used and is replaced with the remaining probability left over from the earlier elements. """ _multinomial_doc_frozen_callparams = "" _multinomial_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" multinomial_docdict_params = { '_doc_default_callparams': _multinomial_doc_default_callparams, '_doc_callparams_note': _multinomial_doc_callparams_note, '_doc_random_state': _doc_random_state } multinomial_docdict_noparams = { '_doc_default_callparams': _multinomial_doc_frozen_callparams, '_doc_callparams_note': _multinomial_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class multinomial_gen(multi_rv_generic): r""" A multinomial random variable. Methods ------- ``pmf(x, n, p)`` Probability mass function. ``logpmf(x, n, p)`` Log of the probability mass function. ``rvs(n, p, size=1, random_state=None)`` Draw random samples from a multinomial distribution. ``entropy(n, p)`` Compute the entropy of the multinomial distribution. ``cov(n, p)`` Compute the covariance matrix of the multinomial distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Notes ----- %(_doc_callparams_note)s Alternatively, the object may be called (as a function) to fix the `n` and `p` parameters, returning a "frozen" multinomial random variable: The probability mass function for `multinomial` is .. math:: f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k}, supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a nonnegative integer and their sum is :math:`n`. .. versionadded:: 0.19.0 Examples -------- >>> from scipy.stats import multinomial >>> rv = multinomial(8, [0.3, 0.2, 0.5]) >>> rv.pmf([1, 3, 4]) 0.042000000000000072 The multinomial distribution for :math:`k=2` is identical to the corresponding binomial distribution (tiny numerical differences notwithstanding): >>> from scipy.stats import binom >>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6]) 0.29030399999999973 >>> binom.pmf(3, 7, 0.4) 0.29030400000000012 The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support broadcasting, under the convention that the vector parameters (``x`` and ``p``) are interpreted as if each row along the last axis is a single object. For instance: >>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7]) array([0.2268945, 0.25412184]) Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``, but following the rules mentioned above they behave as if the rows ``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and ``p.shape = ()``. To obtain the individual elements without broadcasting, we would do this: >>> multinomial.pmf([3, 4], n=7, p=[.3, .7]) 0.2268945 >>> multinomial.pmf([3, 5], 8, p=[.3, .7]) 0.25412184 This broadcasting also works for ``cov``, where the output objects are square matrices of size ``p.shape[-1]``. For example: >>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) array([[[ 0.84, -0.84], [-0.84, 0.84]], [[ 1.2 , -1.2 ], [-1.2 , 1.2 ]]]) In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and following the rules above, these broadcast as if ``p.shape == (2,)``. Thus the result should also be of shape ``(2,)``, but since each output is a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``, where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and ``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``. See also -------- scipy.stats.binom : The binomial distribution. numpy.random.multinomial : Sampling from the multinomial distribution. """ def __init__(self, seed=None): super(multinomial_gen, self).__init__(seed) self.__doc__ = \ doccer.docformat(self.__doc__, multinomial_docdict_params) def __call__(self, n, p, seed=None): """ Create a frozen multinomial distribution. See `multinomial_frozen` for more information. """ return multinomial_frozen(n, p, seed) def _process_parameters(self, n, p): """ Return: n_, p_, npcond. n_ and p_ are arrays of the correct shape; npcond is a boolean array flagging values out of the domain. """ p = np.array(p, dtype=np.float64, copy=True) p[...,-1] = 1. - p[...,:-1].sum(axis=-1) # true for bad p pcond = np.any(p < 0, axis=-1) pcond |= np.any(p > 1, axis=-1) n = np.array(n, dtype=np.int, copy=True) # true for bad n ncond = n <= 0 return n, p, ncond | pcond def _process_quantiles(self, x, n, p): """ Return: x_, xcond. x_ is an int array; xcond is a boolean array flagging values out of the domain. """ xx = np.asarray(x, dtype=np.int) if xx.ndim == 0: raise ValueError("x must be an array.") if xx.size != 0 and not xx.shape[-1] == p.shape[-1]: raise ValueError("Size of each quantile should be size of p: " "received %d, but expected %d." % (xx.shape[-1], p.shape[-1])) # true for x out of the domain cond = np.any(xx != x, axis=-1) cond |= np.any(xx < 0, axis=-1) cond = cond | (np.sum(xx, axis=-1) != n) return xx, cond def _checkresult(self, result, cond, bad_value): result = np.asarray(result) if cond.ndim != 0: result[cond] = bad_value elif cond: if result.ndim == 0: return bad_value result[...] = bad_value return result def _logpmf(self, x, n, p): return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1) def logpmf(self, x, n, p): """ Log of the Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- logpmf : ndarray or scalar Log of the probability mass function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) x, xcond = self._process_quantiles(x, n, p) result = self._logpmf(x, n, p) # replace values for which x was out of the domain; broadcast # xcond to the right shape xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_) result = self._checkresult(result, xcond_, np.NINF) # replace values bad for n or p; broadcast npcond to the right shape npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_) return self._checkresult(result, npcond_, np.NAN) def pmf(self, x, n, p): """ Multinomial probability mass function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pmf : ndarray or scalar Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpmf(x, n, p)) def mean(self, n, p): """ Mean of the Multinomial distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ n, p, npcond = self._process_parameters(n, p) result = n[..., np.newaxis]*p return self._checkresult(result, npcond, np.NAN) def cov(self, n, p): """ Covariance matrix of the multinomial distribution. Parameters ---------- %(_doc_default_callparams)s Returns ------- cov : ndarray The covariance matrix of the distribution """ n, p, npcond = self._process_parameters(n, p) nn = n[..., np.newaxis, np.newaxis] result = nn * np.einsum('...j,...k->...jk', -p, p) # change the diagonal for i in range(p.shape[-1]): result[...,i, i] += n*p[..., i] return self._checkresult(result, npcond, np.nan) def entropy(self, n, p): r""" Compute the entropy of the multinomial distribution. The entropy is computed using this expression: .. math:: f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i + \sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x! Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Multinomial distribution Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) x = np.r_[1:np.max(n)+1] term1 = n*np.sum(entr(p), axis=-1) term1 -= gammaln(n+1) n = n[..., np.newaxis] new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1 x.shape += (1,)*new_axes_needed term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1), axis=(-1, -1-new_axes_needed)) return self._checkresult(term1 + term2, npcond, np.nan) def rvs(self, n, p, size=None, random_state=None): """ Draw random samples from a Multinomial distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of shape (`size`, `len(p)`) Notes ----- %(_doc_callparams_note)s """ n, p, npcond = self._process_parameters(n, p) random_state = self._get_random_state(random_state) return random_state.multinomial(n, p, size) multinomial = multinomial_gen() class multinomial_frozen(multi_rv_frozen): r""" Create a frozen Multinomial distribution. Parameters ---------- n : int number of trials p: array_like probability of a trial falling into each category; should sum to 1 seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ def __init__(self, n, p, seed=None): self._dist = multinomial_gen(seed) self.n, self.p, self.npcond = self._dist._process_parameters(n, p) # monkey patch self._dist def _process_parameters(n, p): return self.n, self.p, self.npcond self._dist._process_parameters = _process_parameters def logpmf(self, x): return self._dist.logpmf(x, self.n, self.p) def pmf(self, x): return self._dist.pmf(x, self.n, self.p) def mean(self): return self._dist.mean(self.n, self.p) def cov(self): return self._dist.cov(self.n, self.p) def entropy(self): return self._dist.entropy(self.n, self.p) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.n, self.p, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # multinomial and fill in default strings in class docstrings for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']: method = multinomial_gen.__dict__[name] method_frozen = multinomial_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, multinomial_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, multinomial_docdict_params) class special_ortho_group_gen(multi_rv_generic): r""" A matrix-valued SO(N) random variable. Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(n)). The `dim` keyword specifies the dimension N. Methods ------- ``rvs(dim=None, size=1, random_state=None)`` Draw random samples from SO(N). Parameters ---------- dim : scalar Dimension of matrices Notes ---------- This class is wrapping the random_rot code from the MDP Toolkit, https://github.com/mdp-toolkit/mdp-toolkit Return a random rotation matrix, drawn from the Haar distribution (the only uniform distribution on SO(n)). The algorithm is described in the paper Stewart, G.W., "The efficient generation of random orthogonal matrices with an application to condition estimators", SIAM Journal on Numerical Analysis, 17(3), pp. 403-409, 1980. For more information see http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization See also the similar `ortho_group`. Examples -------- >>> from scipy.stats import special_ortho_group >>> x = special_ortho_group.rvs(3) >>> np.dot(x, x.T) array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) >>> import scipy.linalg >>> scipy.linalg.det(x) 1.0 This generates one random matrix from SO(3). It is orthogonal and has a determinant of 1. """ def __init__(self, seed=None): super(special_ortho_group_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__) def __call__(self, dim=None, seed=None): """ Create a frozen SO(N) distribution. See `special_ortho_group_frozen` for more information. """ return special_ortho_group_frozen(dim, seed=seed) def _process_parameters(self, dim): """ Dimension N must be specified; it cannot be inferred. """ if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): raise ValueError("""Dimension of rotation must be specified, and must be a scalar greater than 1.""") return dim def rvs(self, dim, size=1, random_state=None): """ Draw random samples from SO(N). Parameters ---------- dim : integer Dimension of rotation space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim) """ random_state = self._get_random_state(random_state) size = int(size) if size > 1: return np.array([self.rvs(dim, size=1, random_state=random_state) for i in range(size)]) dim = self._process_parameters(dim) H = np.eye(dim) D = np.empty((dim,)) for n in range(dim-1): x = random_state.normal(size=(dim-n,)) D[n] = np.sign(x[0]) if x[0] != 0 else 1 x[0] += D[n]*np.sqrt((x*x).sum()) # Householder transformation Hx = (np.eye(dim-n) - 2.*np.outer(x, x)/(x*x).sum()) mat = np.eye(dim) mat[n:, n:] = Hx H = np.dot(H, mat) D[-1] = (-1)**(dim-1)*D[:-1].prod() # Equivalent to np.dot(np.diag(D), H) but faster, apparently H = (D*H.T).T return H special_ortho_group = special_ortho_group_gen() class special_ortho_group_frozen(multi_rv_frozen): def __init__(self, dim=None, seed=None): """ Create a frozen SO(N) distribution. Parameters ---------- dim : scalar Dimension of matrices seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. Examples -------- >>> from scipy.stats import special_ortho_group >>> g = special_ortho_group(5) >>> x = g.rvs() """ self._dist = special_ortho_group_gen(seed) self.dim = self._dist._process_parameters(dim) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.dim, size, random_state) class ortho_group_gen(multi_rv_generic): r""" A matrix-valued O(N) random variable. Return a random orthogonal matrix, drawn from the O(N) Haar distribution (the only uniform distribution on O(N)). The `dim` keyword specifies the dimension N. Methods ------- ``rvs(dim=None, size=1, random_state=None)`` Draw random samples from O(N). Parameters ---------- dim : scalar Dimension of matrices Notes ---------- This class is closely related to `special_ortho_group`. Some care is taken to avoid numerical error, as per the paper by Mezzadri. References ---------- .. [1] F. Mezzadri, "How to generate random matrices from the classical compact groups", :arXiv:`math-ph/0609050v2`. Examples -------- >>> from scipy.stats import ortho_group >>> x = ortho_group.rvs(3) >>> np.dot(x, x.T) array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) >>> import scipy.linalg >>> np.fabs(scipy.linalg.det(x)) 1.0 This generates one random matrix from O(3). It is orthogonal and has a determinant of +1 or -1. """ def __init__(self, seed=None): super(ortho_group_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__) def _process_parameters(self, dim): """ Dimension N must be specified; it cannot be inferred. """ if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): raise ValueError("Dimension of rotation must be specified," "and must be a scalar greater than 1.") return dim def rvs(self, dim, size=1, random_state=None): """ Draw random samples from O(N). Parameters ---------- dim : integer Dimension of rotation space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim) """ random_state = self._get_random_state(random_state) size = int(size) if size > 1: return np.array([self.rvs(dim, size=1, random_state=random_state) for i in range(size)]) dim = self._process_parameters(dim) H = np.eye(dim) for n in range(dim): x = random_state.normal(size=(dim-n,)) # random sign, 50/50, but chosen carefully to avoid roundoff error D = np.sign(x[0]) if x[0] != 0 else 1 x[0] += D*np.sqrt((x*x).sum()) # Householder transformation Hx = -D*(np.eye(dim-n) - 2.*np.outer(x, x)/(x*x).sum()) mat = np.eye(dim) mat[n:, n:] = Hx H = np.dot(H, mat) return H ortho_group = ortho_group_gen() class random_correlation_gen(multi_rv_generic): r""" A random correlation matrix. Return a random correlation matrix, given a vector of eigenvalues. The `eigs` keyword specifies the eigenvalues of the correlation matrix, and implies the dimension. Methods ------- ``rvs(eigs=None, random_state=None)`` Draw random correlation matrices, all with eigenvalues eigs. Parameters ---------- eigs : 1d ndarray Eigenvalues of correlation matrix. Notes ---------- Generates a random correlation matrix following a numerically stable algorithm spelled out by Davies & Higham. This algorithm uses a single O(N) similarity transformation to construct a symmetric positive semi-definite matrix, and applies a series of Givens rotations to scale it to have ones on the diagonal. References ---------- .. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation of correlation matrices and their factors", BIT 2000, Vol. 40, No. 4, pp. 640 651 Examples -------- >>> from scipy.stats import random_correlation >>> np.random.seed(514) >>> x = random_correlation.rvs((.5, .8, 1.2, 1.5)) >>> x array([[ 1. , -0.20387311, 0.18366501, -0.04953711], [-0.20387311, 1. , -0.24351129, 0.06703474], [ 0.18366501, -0.24351129, 1. , 0.38530195], [-0.04953711, 0.06703474, 0.38530195, 1. ]]) >>> import scipy.linalg >>> e, v = scipy.linalg.eigh(x) >>> e array([ 0.5, 0.8, 1.2, 1.5]) """ def __init__(self, seed=None): super(random_correlation_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__) def _process_parameters(self, eigs, tol): eigs = np.asarray(eigs, dtype=float) dim = eigs.size if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1: raise ValueError("Array 'eigs' must be a vector of length greater than 1.") if np.fabs(np.sum(eigs) - dim) > tol: raise ValueError("Sum of eigenvalues must equal dimensionality.") for x in eigs: if x < -tol: raise ValueError("All eigenvalues must be non-negative.") return dim, eigs def _givens_to_1(self, aii, ajj, aij): """Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix. The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ]. The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ]; the elements c and s are returned. Applying the output matrix to the input matrix (as b=g.T M g) results in a matrix with bii=1, provided tr(M) - det(M) >= 1 and floating point issues do not occur. Otherwise, some other valid rotation is returned. When tr(M)==2, also bjj=1. """ aiid = aii - 1. ajjd = ajj - 1. if ajjd == 0: # ajj==1, so swap aii and ajj to avoid division by zero return 0., 1. dd = math.sqrt(max(aij**2 - aiid*ajjd, 0)) # The choice of t should be chosen to avoid cancellation [1] t = (aij + math.copysign(dd, aij)) / ajjd c = 1. / math.sqrt(1. + t*t) if c == 0: # Underflow s = 1.0 else: s = c*t return c, s def _to_corr(self, m): """ Given a psd matrix m, rotate to put one's on the diagonal, turning it into a correlation matrix. This also requires the trace equal the dimensionality. Note: modifies input matrix """ # Check requirements for in-place Givens if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]): raise ValueError() d = m.shape[0] for i in range(d-1): if m[i,i] == 1: continue elif m[i, i] > 1: for j in range(i+1, d): if m[j, j] < 1: break else: for j in range(i+1, d): if m[j, j] > 1: break c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j]) # Use BLAS to apply Givens rotations in-place. Equivalent to: # g = np.eye(d) # g[i, i] = g[j,j] = c # g[j, i] = -s; g[i, j] = s # m = np.dot(g.T, np.dot(m, g)) mv = m.ravel() drot(mv, mv, c, -s, n=d, offx=i*d, incx=1, offy=j*d, incy=1, overwrite_x=True, overwrite_y=True) drot(mv, mv, c, -s, n=d, offx=i, incx=d, offy=j, incy=d, overwrite_x=True, overwrite_y=True) return m def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7): """ Draw random correlation matrices Parameters ---------- eigs : 1d ndarray Eigenvalues of correlation matrix tol : float, optional Tolerance for input parameter checks diag_tol : float, optional Tolerance for deviation of the diagonal of the resulting matrix. Default: 1e-7 Raises ------ RuntimeError Floating point error prevented generating a valid correlation matrix. Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim), each having eigenvalues eigs. """ dim, eigs = self._process_parameters(eigs, tol=tol) random_state = self._get_random_state(random_state) m = ortho_group.rvs(dim, random_state=random_state) m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m m = self._to_corr(m) # Carefully rotate to unit diagonal # Check diagonal if abs(m.diagonal() - 1).max() > diag_tol: raise RuntimeError("Failed to generate a valid correlation matrix") return m random_correlation = random_correlation_gen() class unitary_group_gen(multi_rv_generic): r""" A matrix-valued U(N) random variable. Return a random unitary matrix. The `dim` keyword specifies the dimension N. Methods ------- ``rvs(dim=None, size=1, random_state=None)`` Draw random samples from U(N). Parameters ---------- dim : scalar Dimension of matrices Notes ---------- This class is similar to `ortho_group`. References ---------- .. [1] F. Mezzadri, "How to generate random matrices from the classical compact groups", arXiv:math-ph/0609050v2. Examples -------- >>> from scipy.stats import unitary_group >>> x = unitary_group.rvs(3) >>> np.dot(x, x.conj().T) array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16], [ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16], [ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]]) This generates one random matrix from U(3). The dot product confirms that it is unitary up to machine precision. """ def __init__(self, seed=None): super(unitary_group_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__) def _process_parameters(self, dim): """ Dimension N must be specified; it cannot be inferred. """ if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim): raise ValueError("Dimension of rotation must be specified," "and must be a scalar greater than 1.") return dim def rvs(self, dim, size=1, random_state=None): """ Draw random samples from U(N). Parameters ---------- dim : integer Dimension of space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim) """ random_state = self._get_random_state(random_state) size = int(size) if size > 1: return np.array([self.rvs(dim, size=1, random_state=random_state) for i in range(size)]) dim = self._process_parameters(dim) z = 1/math.sqrt(2)*(random_state.normal(size=(dim,dim)) + 1j*random_state.normal(size=(dim,dim))) q, r = scipy.linalg.qr(z) d = r.diagonal() q *= d/abs(d) return q unitary_group = unitary_group_gen()
120,087
30.811391
116
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_tukeylambda_stats.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy import poly1d from scipy.special import beta # The following code was used to generate the Pade coefficients for the # Tukey Lambda variance function. Version 0.17 of mpmath was used. #--------------------------------------------------------------------------- # import mpmath as mp # # mp.mp.dps = 60 # # one = mp.mpf(1) # two = mp.mpf(2) # # def mpvar(lam): # if lam == 0: # v = mp.pi**2 / three # else: # v = (two / lam**2) * (one / (one + two*lam) - # mp.beta(lam + one, lam + one)) # return v # # t = mp.taylor(mpvar, 0, 8) # p, q = mp.pade(t, 4, 4) # print("p =", [mp.fp.mpf(c) for c in p]) # print("q =", [mp.fp.mpf(c) for c in q]) #--------------------------------------------------------------------------- # Pade coefficients for the Tukey Lambda variance function. _tukeylambda_var_pc = [3.289868133696453, 0.7306125098871127, -0.5370742306855439, 0.17292046290190008, -0.02371146284628187] _tukeylambda_var_qc = [1.0, 3.683605511659861, 4.184152498888124, 1.7660926747377275, 0.2643989311168465] # numpy.poly1d instances for the numerator and denominator of the # Pade approximation to the Tukey Lambda variance. _tukeylambda_var_p = poly1d(_tukeylambda_var_pc[::-1]) _tukeylambda_var_q = poly1d(_tukeylambda_var_qc[::-1]) def tukeylambda_variance(lam): """Variance of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.5, the variance is not defined, so np.nan is returned. For lam = 0.5, np.inf is returned. Notes ----- In an interval around lambda=0, this function uses the [4,4] Pade approximation to compute the variance. Otherwise it uses the standard formula (http://en.wikipedia.org/wiki/Tukey_lambda_distribution). The Pade approximation is used because the standard formula has a removable discontinuity at lambda = 0, and does not produce accurate numerical results near lambda = 0. """ lam = np.asarray(lam) shp = lam.shape lam = np.atleast_1d(lam).astype(np.float64) # For absolute values of lam less than threshold, use the Pade # approximation. threshold = 0.075 # Play games with masks to implement the conditional evaluation of # the distribution. # lambda < -0.5: var = nan low_mask = lam < -0.5 # lambda == -0.5: var = inf neghalf_mask = lam == -0.5 # abs(lambda) < threshold: use Pade approximation small_mask = np.abs(lam) < threshold # else the "regular" case: use the explicit formula. reg_mask = ~(low_mask | neghalf_mask | small_mask) # Get the 'lam' values for the cases where they are needed. small = lam[small_mask] reg = lam[reg_mask] # Compute the function for each case. v = np.empty_like(lam) v[low_mask] = np.nan v[neghalf_mask] = np.inf if small.size > 0: # Use the Pade approximation near lambda = 0. v[small_mask] = _tukeylambda_var_p(small) / _tukeylambda_var_q(small) if reg.size > 0: v[reg_mask] = (2.0 / reg**2) * (1.0 / (1.0 + 2 * reg) - beta(reg + 1, reg + 1)) v.shape = shp return v # The following code was used to generate the Pade coefficients for the # Tukey Lambda kurtosis function. Version 0.17 of mpmath was used. #--------------------------------------------------------------------------- # import mpmath as mp # # mp.mp.dps = 60 # # one = mp.mpf(1) # two = mp.mpf(2) # three = mp.mpf(3) # four = mp.mpf(4) # # def mpkurt(lam): # if lam == 0: # k = mp.mpf(6)/5 # else: # numer = (one/(four*lam+one) - four*mp.beta(three*lam+one, lam+one) + # three*mp.beta(two*lam+one, two*lam+one)) # denom = two*(one/(two*lam+one) - mp.beta(lam+one,lam+one))**2 # k = numer / denom - three # return k # # # There is a bug in mpmath 0.17: when we use the 'method' keyword of the # # taylor function and we request a degree 9 Taylor polynomial, we actually # # get degree 8. # t = mp.taylor(mpkurt, 0, 9, method='quad', radius=0.01) # t = [mp.chop(c, tol=1e-15) for c in t] # p, q = mp.pade(t, 4, 4) # print("p =", [mp.fp.mpf(c) for c in p]) # print("q =", [mp.fp.mpf(c) for c in q]) #--------------------------------------------------------------------------- # Pade coefficients for the Tukey Lambda kurtosis function. _tukeylambda_kurt_pc = [1.2, -5.853465139719495, -22.653447381131077, 0.20601184383406815, 4.59796302262789] _tukeylambda_kurt_qc = [1.0, 7.171149192233599, 12.96663094361842, 0.43075235247853005, -2.789746758009912] # numpy.poly1d instances for the numerator and denominator of the # Pade approximation to the Tukey Lambda kurtosis. _tukeylambda_kurt_p = poly1d(_tukeylambda_kurt_pc[::-1]) _tukeylambda_kurt_q = poly1d(_tukeylambda_kurt_qc[::-1]) def tukeylambda_kurtosis(lam): """Kurtosis of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.25, the variance is not defined, so np.nan is returned. For lam = 0.25, np.inf is returned. """ lam = np.asarray(lam) shp = lam.shape lam = np.atleast_1d(lam).astype(np.float64) # For absolute values of lam less than threshold, use the Pade # approximation. threshold = 0.055 # Use masks to implement the conditional evaluation of the kurtosis. # lambda < -0.25: kurtosis = nan low_mask = lam < -0.25 # lambda == -0.25: kurtosis = inf negqrtr_mask = lam == -0.25 # lambda near 0: use Pade approximation small_mask = np.abs(lam) < threshold # else the "regular" case: use the explicit formula. reg_mask = ~(low_mask | negqrtr_mask | small_mask) # Get the 'lam' values for the cases where they are needed. small = lam[small_mask] reg = lam[reg_mask] # Compute the function for each case. k = np.empty_like(lam) k[low_mask] = np.nan k[negqrtr_mask] = np.inf if small.size > 0: k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small) if reg.size > 0: numer = (1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + 3 * beta(2 * reg + 1, 2 * reg + 1)) denom = 2 * (1.0/(2 * reg + 1) - beta(reg + 1, reg + 1))**2 k[reg_mask] = numer / denom - 3 # The return value will be a numpy array; resetting the shape ensures that # if `lam` was a scalar, the return value is a 0-d array. k.shape = shp return k
6,934
33.331683
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/mstats_basic.py
""" An extension of scipy.stats.stats to support masked arrays """ # Original author (2007): Pierre GF Gerard-Marchant # TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ? # TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ? # TODO : reimplement ksonesamp from __future__ import division, print_function, absolute_import __all__ = ['argstoarray', 'count_tied_groups', 'describe', 'f_oneway', 'find_repeats','friedmanchisquare', 'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis', 'ks_twosamp','ks_2samp','kurtosis','kurtosistest', 'linregress', 'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign', 'normaltest', 'obrientransform', 'pearsonr','plotting_positions','pointbiserialr', 'rankdata', 'scoreatpercentile','sem', 'sen_seasonal_slopes','skew','skewtest','spearmanr', 'theilslopes','tmax','tmean','tmin','trim','trimboth', 'trimtail','trima','trimr','trimmed_mean','trimmed_std', 'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp', 'ttest_ind','ttest_rel','tvar', 'variation', 'winsorize', ] import numpy as np from numpy import ndarray import numpy.ma as ma from numpy.ma import masked, nomask from scipy._lib.six import iteritems import itertools import warnings from collections import namedtuple from . import distributions import scipy.special as special from ._stats_mstats_common import ( _find_repeats, linregress as stats_linregress, theilslopes as stats_theilslopes ) genmissingvaldoc = """ Notes ----- Missing values are considered pair-wise: if a value is missing in x, the corresponding value in y is masked. """ def _chk_asarray(a, axis): # Always returns a masked array, raveled for axis=None a = ma.asanyarray(a) if axis is None: a = ma.ravel(a) outaxis = 0 else: outaxis = axis return a, outaxis def _chk2_asarray(a, b, axis): a = ma.asanyarray(a) b = ma.asanyarray(b) if axis is None: a = ma.ravel(a) b = ma.ravel(b) outaxis = 0 else: outaxis = axis return a, b, outaxis def _chk_size(a,b): a = ma.asanyarray(a) b = ma.asanyarray(b) (na, nb) = (a.size, b.size) if na != nb: raise ValueError("The size of the input array should match!" " (%s <> %s)" % (na, nb)) return (a, b, na) def argstoarray(*args): """ Constructs a 2D array from a group of sequences. Sequences are filled with missing values to match the length of the longest sequence. Parameters ---------- args : sequences Group of sequences. Returns ------- argstoarray : MaskedArray A ( `m` x `n` ) masked array, where `m` is the number of arguments and `n` the length of the longest argument. Notes ----- `numpy.ma.row_stack` has identical behavior, but is called with a sequence of sequences. """ if len(args) == 1 and not isinstance(args[0], ndarray): output = ma.asarray(args[0]) if output.ndim != 2: raise ValueError("The input should be 2D") else: n = len(args) m = max([len(k) for k in args]) output = ma.array(np.empty((n,m), dtype=float), mask=True) for (k,v) in enumerate(args): output[k,:len(v)] = v output[np.logical_not(np.isfinite(output._data))] = masked return output def find_repeats(arr): """Find repeats in arr and return a tuple (repeats, repeat_count). The input is cast to float64. Masked values are discarded. Parameters ---------- arr : sequence Input array. The array is flattened if it is not 1D. Returns ------- repeats : ndarray Array of repeated values. counts : ndarray Array of counts. """ # Make sure we get a copy. ma.compressed promises a "new array", but can # actually return a reference. compr = np.asarray(ma.compressed(arr), dtype=np.float64) try: need_copy = np.may_share_memory(compr, arr) except AttributeError: # numpy < 1.8.2 bug: np.may_share_memory([], []) raises, # while in numpy 1.8.2 and above it just (correctly) returns False. need_copy = False if need_copy: compr = compr.copy() return _find_repeats(compr) def count_tied_groups(x, use_missing=False): """ Counts the number of tied values. Parameters ---------- x : sequence Sequence of data on which to counts the ties use_missing : bool, optional Whether to consider missing values as tied. Returns ------- count_tied_groups : dict Returns a dictionary (nb of ties: nb of groups). Examples -------- >>> from scipy.stats import mstats >>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6] >>> mstats.count_tied_groups(z) {2: 1, 3: 2} In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x). >>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6]) >>> mstats.count_tied_groups(z) {2: 2, 3: 1} >>> z[[1,-1]] = np.ma.masked >>> mstats.count_tied_groups(z, use_missing=True) {2: 2, 3: 1} """ nmasked = ma.getmask(x).sum() # We need the copy as find_repeats will overwrite the initial data data = ma.compressed(x).copy() (ties, counts) = find_repeats(data) nties = {} if len(ties): nties = dict(zip(np.unique(counts), itertools.repeat(1))) nties.update(dict(zip(*find_repeats(counts)))) if nmasked and use_missing: try: nties[nmasked] += 1 except KeyError: nties[nmasked] = 1 return nties def rankdata(data, axis=None, use_missing=False): """Returns the rank (also known as order statistics) of each data point along the given axis. If some values are tied, their rank is averaged. If some values are masked, their rank is set to 0 if use_missing is False, or set to the average rank of the unmasked values if use_missing is True. Parameters ---------- data : sequence Input data. The data is transformed to a masked array axis : {None,int}, optional Axis along which to perform the ranking. If None, the array is first flattened. An exception is raised if the axis is specified for arrays with a dimension larger than 2 use_missing : bool, optional Whether the masked values have a rank of 0 (False) or equal to the average rank of the unmasked values (True). """ def _rank1d(data, use_missing=False): n = data.count() rk = np.empty(data.size, dtype=float) idx = data.argsort() rk[idx[:n]] = np.arange(1,n+1) if use_missing: rk[idx[n:]] = (n+1)/2. else: rk[idx[n:]] = 0 repeats = find_repeats(data.copy()) for r in repeats[0]: condition = (data == r).filled(False) rk[condition] = rk[condition].mean() return rk data = ma.array(data, copy=False) if axis is None: if data.ndim > 1: return _rank1d(data.ravel(), use_missing).reshape(data.shape) else: return _rank1d(data, use_missing) else: return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray) ModeResult = namedtuple('ModeResult', ('mode', 'count')) def mode(a, axis=0): """ Returns an array of the modal (most common) value in the passed array. Parameters ---------- a : array_like n-dimensional array of which to find mode(s). axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. Returns ------- mode : ndarray Array of modal values. count : ndarray Array of counts for each mode. Notes ----- For more details, see `stats.mode`. """ a, axis = _chk_asarray(a, axis) def _mode1D(a): (rep,cnt) = find_repeats(a) if not cnt.ndim: return (0, 0) elif cnt.size: return (rep[cnt.argmax()], cnt.max()) else: not_masked_indices = ma.flatnotmasked_edges(a) first_not_masked_index = not_masked_indices[0] return (a[first_not_masked_index], 1) if axis is None: output = _mode1D(ma.ravel(a)) output = (ma.array(output[0]), ma.array(output[1])) else: output = ma.apply_along_axis(_mode1D, axis, a) newshape = list(a.shape) newshape[axis] = 1 slices = [slice(None)] * output.ndim slices[axis] = 0 modes = output[tuple(slices)].reshape(newshape) slices[axis] = 1 counts = output[tuple(slices)].reshape(newshape) output = (modes, counts) return ModeResult(*output) def _betai(a, b, x): x = np.asanyarray(x) x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0 return special.betainc(a, b, x) def msign(x): """Returns the sign of x, or 0 if x is masked.""" return ma.filled(np.sign(x), 0) def pearsonr(x,y): """ Calculates a Pearson correlation coefficient and the p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. Strictly speaking, Pearson's correlation requires that each dataset be normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as `x` increases, so does `y`. Negative correlations imply that as `x` increases, `y` decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- x : 1-D array_like Input y : 1-D array_like Input Returns ------- pearsonr : float Pearson's correlation coefficient, 2-tailed p-value. References ---------- http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation """ (x, y, n) = _chk_size(x, y) (x, y) = (x.ravel(), y.ravel()) # Get the common mask and the total nb of unmasked elements m = ma.mask_or(ma.getmask(x), ma.getmask(y)) n -= m.sum() df = n-2 if df < 0: return (masked, masked) (mx, my) = (x.mean(), y.mean()) (xm, ym) = (x-mx, y-my) r_num = ma.add.reduce(xm*ym) r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym)) r = r_num / r_den # Presumably, if r > 1, then it is only some small artifact of floating # point arithmetic. r = min(r, 1.0) r = max(r, -1.0) if r is masked or abs(r) == 1.0: prob = 0. else: t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r prob = _betai(0.5*df, 0.5, df/(df + t_squared)) return r, prob SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue')) def spearmanr(x, y, use_ties=True): """ Calculates a Spearman rank-order correlation coefficient and the p-value to test for non-correlation. The Spearman correlation is a nonparametric measure of the linear relationship between two datasets. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply a monotonic relationship. Positive correlations imply that as `x` increases, so does `y`. Negative correlations imply that as `x` increases, `y` decreases. Missing values are discarded pair-wise: if a value is missing in `x`, the corresponding value in `y` is masked. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. Parameters ---------- x : array_like The length of `x` must be > 2. y : array_like The length of `y` must be > 2. use_ties : bool, optional Whether the correction for ties should be computed. Returns ------- correlation : float Spearman correlation coefficient pvalue : float 2-tailed p-value. References ---------- [CRCProbStat2000] section 14.7 """ (x, y, n) = _chk_size(x, y) (x, y) = (x.ravel(), y.ravel()) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # need int() here, otherwise numpy defaults to 32 bit # integer on all Windows architectures, causing overflow. # int() will keep it infinite precision. n -= int(m.sum()) if m is not nomask: x = ma.array(x, mask=m, copy=True) y = ma.array(y, mask=m, copy=True) df = n-2 if df < 0: raise ValueError("The input must have at least 3 entries!") # Gets the ranks and rank differences rankx = rankdata(x) ranky = rankdata(y) dsq = np.add.reduce((rankx-ranky)**2) # Tie correction if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) corr_x = sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12. corr_y = sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12. else: corr_x = corr_y = 0 denom = n*(n**2 - 1)/6. if corr_x != 0 or corr_y != 0: rho = denom - dsq - corr_x - corr_y rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y)) else: rho = 1. - dsq/denom t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho if t is masked: prob = 0. else: prob = _betai(0.5*df, 0.5, df/(df + t * t)) return SpearmanrResult(rho, prob) KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue')) def kendalltau(x, y, use_ties=True, use_missing=False): """ Computes Kendall's rank correlation tau on two variables *x* and *y*. Parameters ---------- x : sequence First data list (for example, time). y : sequence Second data list. use_ties : {True, False}, optional Whether ties correction should be performed. use_missing : {False, True}, optional Whether missing data should be allocated a rank of 0 (False) or the average rank (True) Returns ------- correlation : float Kendall tau pvalue : float Approximate 2-side p-value. """ (x, y, n) = _chk_size(x, y) (x, y) = (x.flatten(), y.flatten()) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) if m is not nomask: x = ma.array(x, mask=m, copy=True) y = ma.array(y, mask=m, copy=True) # need int() here, otherwise numpy defaults to 32 bit # integer on all Windows architectures, causing overflow. # int() will keep it infinite precision. n -= int(m.sum()) if n < 2: return KendalltauResult(np.nan, np.nan) rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0) ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0) idx = rx.argsort() (rx, ry) = (rx[idx], ry[idx]) C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum() for i in range(len(ry)-1)], dtype=float) D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum() for i in range(len(ry)-1)], dtype=float) if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float) corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float) denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) else: denom = n*(n-1)/2. tau = (C-D) / denom var_s = n*(n-1)*(2*n+5) if use_ties: var_s -= sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties)) var_s -= sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties)) v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\ np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float) v1 /= 2.*n*(n-1) if n > 2: v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)], dtype=float) * \ np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)], dtype=float) v2 /= 9.*n*(n-1)*(n-2) else: v2 = 0 else: v1 = v2 = 0 var_s /= 18. var_s += (v1 + v2) z = (C-D)/np.sqrt(var_s) prob = special.erfc(abs(z)/np.sqrt(2)) return KendalltauResult(tau, prob) def kendalltau_seasonal(x): """ Computes a multivariate Kendall's rank correlation tau, for seasonal data. Parameters ---------- x : 2-D ndarray Array of seasonal data, with seasons in columns. """ x = ma.array(x, subok=True, copy=False, ndmin=2) (n,m) = x.shape n_p = x.count(0) S_szn = sum(msign(x[i:]-x[i]).sum(0) for i in range(n)) S_tot = S_szn.sum() n_tot = x.count() ties = count_tied_groups(x.compressed()) corr_ties = sum(v*k*(k-1) for (k,v) in iteritems(ties)) denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. R = rankdata(x, axis=0, use_missing=True) K = ma.empty((m,m), dtype=int) covmat = ma.empty((m,m), dtype=float) denom_szn = ma.empty(m, dtype=float) for j in range(m): ties_j = count_tied_groups(x[:,j].compressed()) corr_j = sum(v*k*(k-1) for (k,v) in iteritems(ties_j)) cmb = n_p[j]*(n_p[j]-1) for k in range(j,m,1): K[j,k] = sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() for i in range(n)) covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() - n*(n_p[j]+1)*(n_p[k]+1))/3. K[k,j] = K[j,k] covmat[k,j] = covmat[j,k] denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2. var_szn = covmat.diagonal() z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn) z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum()) z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum()) prob_szn = special.erfc(abs(z_szn)/np.sqrt(2)) prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2)) prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2)) chi2_tot = (z_szn*z_szn).sum() chi2_trd = m * z_szn.mean()**2 output = {'seasonal tau': S_szn/denom_szn, 'global tau': S_tot/denom_tot, 'global tau (alt)': S_tot/denom_szn.sum(), 'seasonal p-value': prob_szn, 'global p-value (indep)': prob_tot_ind, 'global p-value (dep)': prob_tot_dep, 'chi2 total': chi2_tot, 'chi2 trend': chi2_trd, } return output PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation', 'pvalue')) def pointbiserialr(x, y): """Calculates a point biserial correlation coefficient and its p-value. Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- correlation : float R value pvalue : float 2-tailed p-value Notes ----- Missing values are considered pair-wise: if a value is missing in x, the corresponding value in y is masked. For more details on `pointbiserialr`, see `stats.pointbiserialr`. """ x = ma.fix_invalid(x, copy=True).astype(bool) y = ma.fix_invalid(y, copy=True).astype(float) # Get rid of the missing data m = ma.mask_or(ma.getmask(x), ma.getmask(y)) if m is not nomask: unmask = np.logical_not(m) x = x[unmask] y = y[unmask] n = len(x) # phat is the fraction of x values that are True phat = x.sum() / float(n) y0 = y[~x] # y-values where x is False y1 = y[x] # y-values where x is True y0m = y0.mean() y1m = y1.mean() rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std() df = n-2 t = rpb*ma.sqrt(df/(1.0-rpb**2)) prob = _betai(0.5*df, 0.5, df/(df+t*t)) return PointbiserialrResult(rpb, prob) LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')) def linregress(x, y=None): """ Linear regression calculation Note that the non-masked version is used, and that this docstring is replaced by the non-masked docstring + some info on missing data. """ if y is None: x = ma.array(x) if x.shape[0] == 2: x, y = x elif x.shape[1] == 2: x, y = x.T else: msg = ("If only `x` is given as input, it has to be of shape " "(2, N) or (N, 2), provided shape was %s" % str(x.shape)) raise ValueError(msg) else: x = ma.array(x) y = ma.array(y) x = x.flatten() y = y.flatten() m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False) if m is not nomask: x = ma.array(x, mask=m) y = ma.array(y, mask=m) if np.any(~m): slope, intercept, r, prob, sterrest = stats_linregress(x.data[~m], y.data[~m]) else: # All data is masked return None, None, None, None, None else: slope, intercept, r, prob, sterrest = stats_linregress(x.data, y.data) return LinregressResult(slope, intercept, r, prob, sterrest) if stats_linregress.__doc__: linregress.__doc__ = stats_linregress.__doc__ + genmissingvaldoc def theilslopes(y, x=None, alpha=0.95): r""" Computes the Theil-Sen estimator for a set of points (x, y). `theilslopes` implements a method for robust linear regression. It computes the slope as the median of all slopes between paired values. Parameters ---------- y : array_like Dependent variable. x : array_like or None, optional Independent variable. If None, use ``arange(len(y))`` instead. alpha : float, optional Confidence degree between 0 and 1. Default is 95% confidence. Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are interpreted as "find the 90% confidence interval". Returns ------- medslope : float Theil slope. medintercept : float Intercept of the Theil line, as ``median(y) - medslope*median(x)``. lo_slope : float Lower bound of the confidence interval on `medslope`. up_slope : float Upper bound of the confidence interval on `medslope`. Notes ----- For more details on `theilslopes`, see `stats.theilslopes`. """ y = ma.asarray(y).flatten() if x is None: x = ma.arange(len(y), dtype=float) else: x = ma.asarray(x).flatten() if len(x) != len(y): raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x))) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) y._mask = x._mask = m # Disregard any masked elements of x or y y = y.compressed() x = x.compressed().astype(float) # We now have unmasked arrays so can use `stats.theilslopes` return stats_theilslopes(y, x, alpha=alpha) def sen_seasonal_slopes(x): x = ma.array(x, subok=True, copy=False, ndmin=2) (n,_) = x.shape # Get list of slopes per season szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None] for i in range(n)]) szn_medslopes = ma.median(szn_slopes, axis=0) medslope = ma.median(szn_slopes, axis=None) return szn_medslopes, medslope Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue')) def ttest_1samp(a, popmean, axis=0): """ Calculates the T-test for the mean of ONE group of scores. Parameters ---------- a : array_like sample observation popmean : float or array_like expected value in null hypothesis, if array_like than it must have the same shape as `a` excluding the axis dimension axis : int or None, optional Axis along which to compute test. If None, compute over the whole array `a`. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Notes ----- For more details on `ttest_1samp`, see `stats.ttest_1samp`. """ a, axis = _chk_asarray(a, axis) if a.size == 0: return (np.nan, np.nan) x = a.mean(axis=axis) v = a.var(axis=axis, ddof=1) n = a.count(axis=axis) # force df to be an array for masked division not to throw a warning df = ma.asanyarray(n - 1.0) svar = ((n - 1.0) * v) / df with np.errstate(divide='ignore', invalid='ignore'): t = (x - popmean) / ma.sqrt(svar / n) prob = special.betainc(0.5*df, 0.5, df/(df + t*t)) return Ttest_1sampResult(t, prob) ttest_onesamp = ttest_1samp Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue')) def ttest_ind(a, b, axis=0, equal_var=True): """ Calculates the T-test for the means of TWO INDEPENDENT samples of scores. Parameters ---------- a, b : array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. equal_var : bool, optional If True, perform a standard independent 2 sample test that assumes equal population variances. If False, perform Welch's t-test, which does not assume equal population variance. .. versionadded:: 0.17.0 Returns ------- statistic : float or array The calculated t-statistic. pvalue : float or array The two-tailed p-value. Notes ----- For more details on `ttest_ind`, see `stats.ttest_ind`. """ a, b, axis = _chk2_asarray(a, b, axis) if a.size == 0 or b.size == 0: return Ttest_indResult(np.nan, np.nan) (x1, x2) = (a.mean(axis), b.mean(axis)) (v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1)) (n1, n2) = (a.count(axis), b.count(axis)) if equal_var: # force df to be an array for masked division not to throw a warning df = ma.asanyarray(n1 + n2 - 2.0) svar = ((n1-1)*v1+(n2-1)*v2) / df denom = ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here! else: vn1 = v1/n1 vn2 = v2/n2 with np.errstate(divide='ignore', invalid='ignore'): df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1)) # If df is undefined, variances are zero. # It doesn't matter what df is as long as it is not NaN. df = np.where(np.isnan(df), 1, df) denom = ma.sqrt(vn1 + vn2) with np.errstate(divide='ignore', invalid='ignore'): t = (x1-x2) / denom probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape) return Ttest_indResult(t, probs.squeeze()) Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue')) def ttest_rel(a, b, axis=0): """ Calculates the T-test on TWO RELATED samples of scores, a and b. Parameters ---------- a, b : array_like The arrays must have the same shape. axis : int or None, optional Axis along which to compute test. If None, compute over the whole arrays, `a`, and `b`. Returns ------- statistic : float or array t-statistic pvalue : float or array two-tailed p-value Notes ----- For more details on `ttest_rel`, see `stats.ttest_rel`. """ a, b, axis = _chk2_asarray(a, b, axis) if len(a) != len(b): raise ValueError('unequal length arrays') if a.size == 0 or b.size == 0: return Ttest_relResult(np.nan, np.nan) n = a.count(axis) df = ma.asanyarray(n-1.0) d = (a-b).astype('d') dm = d.mean(axis) v = d.var(axis=axis, ddof=1) denom = ma.sqrt(v / n) with np.errstate(divide='ignore', invalid='ignore'): t = dm / denom probs = special.betainc(0.5*df, 0.5, df/(df + t*t)).reshape(t.shape).squeeze() return Ttest_relResult(t, probs) MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue')) def mannwhitneyu(x,y, use_continuity=True): """ Computes the Mann-Whitney statistic Missing values in `x` and/or `y` are discarded. Parameters ---------- x : sequence Input y : sequence Input use_continuity : {True, False}, optional Whether a continuity correction (1/2.) should be taken into account. Returns ------- statistic : float The Mann-Whitney statistics pvalue : float Approximate p-value assuming a normal distribution. """ x = ma.asarray(x).compressed().view(ndarray) y = ma.asarray(y).compressed().view(ndarray) ranks = rankdata(np.concatenate([x,y])) (nx, ny) = (len(x), len(y)) nt = nx + ny U = ranks[:nx].sum() - nx*(nx+1)/2. U = max(U, nx*ny - U) u = nx*ny - U mu = (nx*ny)/2. sigsq = (nt**3 - nt)/12. ties = count_tied_groups(ranks) sigsq -= sum(v*(k**3-k) for (k,v) in iteritems(ties))/12. sigsq *= nx*ny/float(nt*(nt-1)) if use_continuity: z = (U - 1/2. - mu) / ma.sqrt(sigsq) else: z = (U - mu) / ma.sqrt(sigsq) prob = special.erfc(abs(z)/np.sqrt(2)) return MannwhitneyuResult(u, prob) KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue')) def kruskal(*args): """ Compute the Kruskal-Wallis H-test for independent samples Parameters ---------- sample1, sample2, ... : array_like Two or more arrays with the sample measurements can be given as arguments. Returns ------- statistic : float The Kruskal-Wallis H statistic, corrected for ties pvalue : float The p-value for the test using the assumption that H has a chi square distribution Notes ----- For more details on `kruskal`, see `stats.kruskal`. """ output = argstoarray(*args) ranks = ma.masked_equal(rankdata(output, use_missing=False), 0) sumrk = ranks.sum(-1) ngrp = ranks.count(-1) ntot = ranks.count() H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) # Tie correction ties = count_tied_groups(ranks) T = 1. - sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot) if T == 0: raise ValueError('All numbers are identical in kruskal') H /= T df = len(output) - 1 prob = distributions.chi2.sf(H, df) return KruskalResult(H, prob) kruskalwallis = kruskal def ks_twosamp(data1, data2, alternative="two-sided"): """ Computes the Kolmogorov-Smirnov test on two samples. Missing values are discarded. Parameters ---------- data1 : array_like First data set data2 : array_like Second data set alternative : {'two-sided', 'less', 'greater'}, optional Indicates the alternative hypothesis. Default is 'two-sided'. Returns ------- d : float Value of the Kolmogorov Smirnov test p : float Corresponding p-value. """ (data1, data2) = (ma.asarray(data1), ma.asarray(data2)) (n1, n2) = (data1.count(), data2.count()) n = (n1*n2/float(n1+n2)) mix = ma.concatenate((data1.compressed(), data2.compressed())) mixsort = mix.argsort(kind='mergesort') csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum() # Check for ties if len(np.unique(mix)) < (n1+n2): csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]] alternative = str(alternative).lower()[0] if alternative == 't': d = ma.abs(csum).max() prob = special.kolmogorov(np.sqrt(n)*d) elif alternative == 'l': d = -csum.min() prob = np.exp(-2*n*d**2) elif alternative == 'g': d = csum.max() prob = np.exp(-2*n*d**2) else: raise ValueError("Invalid value for the alternative hypothesis: " "should be in 'two-sided', 'less' or 'greater'") return (d, prob) ks_2samp = ks_twosamp def trima(a, limits=None, inclusive=(True,True)): """ Trims an array by masking the data outside some given limits. Returns a masked version of the input array. Parameters ---------- a : array_like Input array. limits : {None, tuple}, optional Tuple of (lower limit, upper limit) in absolute values. Values of the input array lower (greater) than the lower (upper) limit will be masked. A limit is None indicates an open interval. inclusive : (bool, bool) tuple, optional Tuple of (lower flag, upper flag), indicating whether values exactly equal to the lower (upper) limit are allowed. """ a = ma.asarray(a) a.unshare_mask() if (limits is None) or (limits == (None, None)): return a (lower_lim, upper_lim) = limits (lower_in, upper_in) = inclusive condition = False if lower_lim is not None: if lower_in: condition |= (a < lower_lim) else: condition |= (a <= lower_lim) if upper_lim is not None: if upper_in: condition |= (a > upper_lim) else: condition |= (a >= upper_lim) a[condition.filled(True)] = masked return a def trimr(a, limits=None, inclusive=(True, True), axis=None): """ Trims an array by masking some proportion of the data on each end. Returns a masked version of the input array. Parameters ---------- a : sequence Input array. limits : {None, tuple}, optional Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)). The value of one limit can be set to None to indicate an open interval. inclusive : {(True,True) tuple}, optional Tuple of flags indicating whether the number of data being masked on the left (right) end should be truncated (True) or rounded (False) to integers. axis : {None,int}, optional Axis along which to trim. If None, the whole array is trimmed, but its shape is maintained. """ def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive): n = a.count() idx = a.argsort() if low_limit: if low_inclusive: lowidx = int(low_limit*n) else: lowidx = np.round(low_limit*n) a[idx[:lowidx]] = masked if up_limit is not None: if up_inclusive: upidx = n - int(n*up_limit) else: upidx = n - np.round(n*up_limit) a[idx[upidx:]] = masked return a a = ma.asarray(a) a.unshare_mask() if limits is None: return a # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if axis is None: shp = a.shape return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp) else: return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc) trimdoc = """ Parameters ---------- a : sequence Input array limits : {None, tuple}, optional If `relative` is False, tuple (lower limit, upper limit) in absolute values. Values of the input array lower (greater) than the lower (upper) limit are masked. If `relative` is True, tuple (lower percentage, upper percentage) to cut on each side of the array, with respect to the number of unmasked data. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)) In each case, the value of one limit can be set to None to indicate an open interval. If limits is None, no trimming is performed inclusive : {(bool, bool) tuple}, optional If `relative` is False, tuple indicating whether values exactly equal to the absolute limits are allowed. If `relative` is True, tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). relative : bool, optional Whether to consider the limits as absolute values (False) or proportions to cut (True). axis : int, optional Axis along which to trim. """ def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None): """ Trims an array by masking the data outside some given limits. Returns a masked version of the input array. %s Examples -------- >>> from scipy.stats.mstats import trim >>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10] >>> print(trim(z,(3,8))) [-- -- 3 4 5 6 7 8 -- --] >>> print(trim(z,(0.1,0.2),relative=True)) [-- 2 3 4 5 6 7 8 -- --] """ if relative: return trimr(a, limits=limits, inclusive=inclusive, axis=axis) else: return trima(a, limits=limits, inclusive=inclusive) if trim.__doc__ is not None: trim.__doc__ = trim.__doc__ % trimdoc def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None): """ Trims the smallest and largest data values. Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and ``int(proportiontocut * n)`` largest values of data along the given axis, where n is the number of unmasked values before trimming. Parameters ---------- data : ndarray Data to trim. proportiontocut : float, optional Percentage of trimming (as a float between 0 and 1). If n is the number of unmasked values before trimming, the number of values after trimming is ``(1 - 2*proportiontocut) * n``. Default is 0.2. inclusive : {(bool, bool) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). axis : int, optional Axis along which to perform the trimming. If None, the input array is first flattened. """ return trimr(data, limits=(proportiontocut,proportiontocut), inclusive=inclusive, axis=axis) def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True), axis=None): """ Trims the data by masking values from one tail. Parameters ---------- data : array_like Data to trim. proportiontocut : float, optional Percentage of trimming. If n is the number of unmasked values before trimming, the number of values after trimming is ``(1 - proportiontocut) * n``. Default is 0.2. tail : {'left','right'}, optional If 'left' the `proportiontocut` lowest values will be masked. If 'right' the `proportiontocut` highest values will be masked. Default is 'left'. inclusive : {(bool, bool) tuple}, optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). Default is (True, True). axis : int, optional Axis along which to perform the trimming. If None, the input array is first flattened. Default is None. Returns ------- trimtail : ndarray Returned array of same shape as `data` with masked tail values. """ tail = str(tail).lower()[0] if tail == 'l': limits = (proportiontocut,None) elif tail == 'r': limits = (None, proportiontocut) else: raise TypeError("The tail argument should be in ('left','right')") return trimr(data, limits=limits, axis=axis, inclusive=inclusive) trim1 = trimtail def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None): """Returns the trimmed mean of the data along the given axis. %s """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis) else: return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis) def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None, ddof=0): """Returns the trimmed variance of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance. """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: out = trimr(a,limits=limits, inclusive=inclusive,axis=axis) else: out = trima(a,limits=limits,inclusive=inclusive) return out.var(axis=axis, ddof=ddof) def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True, axis=None, ddof=0): """Returns the trimmed standard deviation of the data along the given axis. %s ddof : {0,integer}, optional Means Delta Degrees of Freedom. The denominator used during computations is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un- biased estimate of the variance. """ % trimdoc if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) if relative: out = trimr(a,limits=limits,inclusive=inclusive,axis=axis) else: out = trima(a,limits=limits,inclusive=inclusive) return out.std(axis=axis,ddof=ddof) def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None): """ Returns the standard error of the trimmed mean along the given axis. Parameters ---------- a : sequence Input array limits : {(0.1,0.1), tuple of float}, optional tuple (lower percentage, upper percentage) to cut on each side of the array, with respect to the number of unmasked data. If n is the number of unmasked data before trimming, the values smaller than ``n * limits[0]`` and the values larger than ``n * `limits[1]`` are masked, and the total number of unmasked data after trimming is ``n * (1.-sum(limits))``. In each case, the value of one limit can be set to None to indicate an open interval. If `limits` is None, no trimming is performed. inclusive : {(bool, bool) tuple} optional Tuple indicating whether the number of data being masked on each side should be rounded (True) or truncated (False). axis : int, optional Axis along which to trim. Returns ------- trimmed_stde : scalar or ndarray """ def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive): "Returns the standard error of the trimmed mean for a 1D input data." n = a.count() idx = a.argsort() if low_limit: if low_inclusive: lowidx = int(low_limit*n) else: lowidx = np.round(low_limit*n) a[idx[:lowidx]] = masked if up_limit is not None: if up_inclusive: upidx = n - int(n*up_limit) else: upidx = n - np.round(n*up_limit) a[idx[upidx:]] = masked a[idx[:lowidx]] = a[idx[lowidx]] a[idx[upidx:]] = a[idx[upidx-1]] winstd = a.std(ddof=1) return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a))) a = ma.array(a, copy=True, subok=True) a.unshare_mask() if limits is None: return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis)) if (not isinstance(limits,tuple)) and isinstance(limits,float): limits = (limits, limits) # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if (axis is None): return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc) else: if a.ndim > 2: raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim) return ma.apply_along_axis(_trimmed_stde_1D, axis, a, lolim,uplim,loinc,upinc) def _mask_to_limits(a, limits, inclusive): """Mask an array for values outside of given limits. This is primarily a utility function. Parameters ---------- a : array limits : (float or None, float or None) A tuple consisting of the (lower limit, upper limit). Values in the input array less than the lower limit or greater than the upper limit will be masked out. None implies no limit. inclusive : (bool, bool) A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to lower or upper are allowed. Returns ------- A MaskedArray. Raises ------ A ValueError if there are no values within the given limits. """ lower_limit, upper_limit = limits lower_include, upper_include = inclusive am = ma.MaskedArray(a) if lower_limit is not None: if lower_include: am = ma.masked_less(am, lower_limit) else: am = ma.masked_less_equal(am, lower_limit) if upper_limit is not None: if upper_include: am = ma.masked_greater(am, upper_limit) else: am = ma.masked_greater_equal(am, upper_limit) if am.count() == 0: raise ValueError("No array values within given limits") return am def tmean(a, limits=None, inclusive=(True, True), axis=None): """ Compute the trimmed mean. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None (default), then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is None. Returns ------- tmean : float Notes ----- For more details on `tmean`, see `stats.tmean`. """ return trima(a, limits=limits, inclusive=inclusive).mean(axis=axis) def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed variance This function computes the sample variance of an array of values, while ignoring values which are outside of given `limits`. Parameters ---------- a : array_like Array of values. limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is zero. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tvar : float Trimmed variance. Notes ----- For more details on `tvar`, see `stats.tvar`. """ a = a.astype(float).ravel() if limits is None: n = (~a.mask).sum() # todo: better way to do that? return np.ma.var(a) * n/(n-1.) am = _mask_to_limits(a, limits=limits, inclusive=inclusive) return np.ma.var(am, axis=axis, ddof=ddof) def tmin(a, lowerlimit=None, axis=0, inclusive=True): """ Compute the trimmed minimum Parameters ---------- a : array_like array of values lowerlimit : None or float, optional Values in the input array less than the given limit will be ignored. When lowerlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the lower limit are included. The default value is True. Returns ------- tmin : float, int or ndarray Notes ----- For more details on `tmin`, see `stats.tmin`. """ a, axis = _chk_asarray(a, axis) am = trima(a, (lowerlimit, None), (inclusive, False)) return ma.minimum.reduce(am, axis) def tmax(a, upperlimit=None, axis=0, inclusive=True): """ Compute the trimmed maximum This function computes the maximum value of an array along a given axis, while ignoring values larger than a specified upper limit. Parameters ---------- a : array_like array of values upperlimit : None or float, optional Values in the input array greater than the given limit will be ignored. When upperlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array `a`. inclusive : {True, False}, optional This flag determines whether values exactly equal to the upper limit are included. The default value is True. Returns ------- tmax : float, int or ndarray Notes ----- For more details on `tmax`, see `stats.tmax`. """ a, axis = _chk_asarray(a, axis) am = trima(a, (None, upperlimit), (False, inclusive)) return ma.maximum.reduce(am, axis) def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1): """ Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given `limits`. Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is zero. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Notes ----- For more details on `tsem`, see `stats.tsem`. """ a = ma.asarray(a).ravel() if limits is None: n = float(a.count()) return a.std(axis=axis, ddof=ddof)/ma.sqrt(n) am = trima(a.ravel(), limits, inclusive) sd = np.sqrt(am.var(axis=axis, ddof=ddof)) return sd / np.sqrt(am.count()) def winsorize(a, limits=None, inclusive=(True, True), inplace=False, axis=None): """Returns a Winsorized version of the input array. The (limits[0])th lowest values are set to the (limits[0])th percentile, and the (limits[1])th highest values are set to the (1 - limits[1])th percentile. Masked values are skipped. Parameters ---------- a : sequence Input array. limits : {None, tuple of float}, optional Tuple of the percentages to cut on each side of the array, with respect to the number of unmasked data, as floats between 0. and 1. Noting n the number of unmasked data before trimming, the (n*limits[0])th smallest data and the (n*limits[1])th largest data are masked, and the total number of unmasked data after trimming is n*(1.-sum(limits)) The value of one limit can be set to None to indicate an open interval. inclusive : {(True, True) tuple}, optional Tuple indicating whether the number of data being masked on each side should be truncated (True) or rounded (False). inplace : {False, True}, optional Whether to winsorize in place (True) or to use a copy (False) axis : {None, int}, optional Axis along which to trim. If None, the whole array is trimmed, but its shape is maintained. Notes ----- This function is applied to reduce the effect of possibly spurious outliers by limiting the extreme values. """ def _winsorize1D(a, low_limit, up_limit, low_include, up_include): n = a.count() idx = a.argsort() if low_limit: if low_include: lowidx = int(low_limit * n) else: lowidx = np.round(low_limit * n).astype(int) a[idx[:lowidx]] = a[idx[lowidx]] if up_limit is not None: if up_include: upidx = n - int(n * up_limit) else: upidx = n - np.round(n * up_limit).astype(int) a[idx[upidx:]] = a[idx[upidx - 1]] return a # We are going to modify a: better make a copy a = ma.array(a, copy=np.logical_not(inplace)) if limits is None: return a if (not isinstance(limits, tuple)) and isinstance(limits, float): limits = (limits, limits) # Check the limits (lolim, uplim) = limits errmsg = "The proportion to cut from the %s should be between 0. and 1." if lolim is not None: if lolim > 1. or lolim < 0: raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim) if uplim is not None: if uplim > 1. or uplim < 0: raise ValueError(errmsg % 'end' + "(got %s)" % uplim) (loinc, upinc) = inclusive if axis is None: shp = a.shape return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp) else: return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc, upinc) def moment(a, moment=1, axis=0): """ Calculates the nth moment about the mean for a sample. Parameters ---------- a : array_like data moment : int, optional order of central moment that is returned axis : int or None, optional Axis along which the central moment is computed. Default is 0. If None, compute over the whole array `a`. Returns ------- n-th central moment : ndarray or float The appropriate moment along the given axis or over all values if axis is None. The denominator for the moment calculation is the number of observations, no degrees of freedom correction is done. Notes ----- For more details about `moment`, see `stats.moment`. """ a, axis = _chk_asarray(a, axis) if moment == 1: # By definition the first moment about the mean is 0. shape = list(a.shape) del shape[axis] if shape: # return an actual array of the appropriate shape return np.zeros(shape, dtype=float) else: # the input was 1D, so return a scalar instead of a rank-0 array return np.float64(0.0) else: # Exponentiation by squares: form exponent sequence n_list = [moment] current_n = moment while current_n > 2: if current_n % 2: current_n = (current_n-1)/2 else: current_n /= 2 n_list.append(current_n) # Starting point for exponentiation by squares a_zero_mean = a - ma.expand_dims(a.mean(axis), axis) if n_list[-1] == 1: s = a_zero_mean.copy() else: s = a_zero_mean**2 # Perform multiplications for n in n_list[-2::-1]: s = s**2 if n % 2: s *= a_zero_mean return s.mean(axis) def variation(a, axis=0): """ Computes the coefficient of variation, the ratio of the biased standard deviation to the mean. Parameters ---------- a : array_like Input array. axis : int or None, optional Axis along which to calculate the coefficient of variation. Default is 0. If None, compute over the whole array `a`. Returns ------- variation : ndarray The calculated variation along the requested axis. Notes ----- For more details about `variation`, see `stats.variation`. """ a, axis = _chk_asarray(a, axis) return a.std(axis)/a.mean(axis) def skew(a, axis=0, bias=True): """ Computes the skewness of a data set. Parameters ---------- a : ndarray data axis : int or None, optional Axis along which skewness is calculated. Default is 0. If None, compute over the whole array `a`. bias : bool, optional If False, then the calculations are corrected for statistical bias. Returns ------- skewness : ndarray The skewness of values along an axis, returning 0 where all values are equal. Notes ----- For more details about `skew`, see `stats.skew`. """ a, axis = _chk_asarray(a,axis) n = a.count(axis) m2 = moment(a, 2, axis) m3 = moment(a, 3, axis) olderr = np.seterr(all='ignore') try: vals = ma.where(m2 == 0, 0, m3 / m2**1.5) finally: np.seterr(**olderr) if not bias: can_correct = (n > 2) & (m2 > 0) if can_correct.any(): m2 = np.extract(can_correct, m2) m3 = np.extract(can_correct, m3) nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5 np.place(vals, can_correct, nval) return vals def kurtosis(a, axis=0, fisher=True, bias=True): """ Computes the kurtosis (Fisher or Pearson) of a dataset. Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definition is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution. If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from biased moment estimators Use `kurtosistest` to see if result is close enough to normal. Parameters ---------- a : array data for which the kurtosis is calculated axis : int or None, optional Axis along which the kurtosis is calculated. Default is 0. If None, compute over the whole array `a`. fisher : bool, optional If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's definition is used (normal ==> 3.0). bias : bool, optional If False, then the calculations are corrected for statistical bias. Returns ------- kurtosis : array The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition and 0 for Pearson's definition. Notes ----- For more details about `kurtosis`, see `stats.kurtosis`. """ a, axis = _chk_asarray(a, axis) m2 = moment(a, 2, axis) m4 = moment(a, 4, axis) olderr = np.seterr(all='ignore') try: vals = ma.where(m2 == 0, 0, m4 / m2**2.0) finally: np.seterr(**olderr) if not bias: n = a.count(axis) can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0) if can_correct.any(): n = np.extract(can_correct, n) m2 = np.extract(can_correct, m2) m4 = np.extract(can_correct, m4) nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0) np.place(vals, can_correct, nval+3.0) if fisher: return vals - 3 else: return vals DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis')) def describe(a, axis=0, ddof=0, bias=True): """ Computes several descriptive statistics of the passed array. Parameters ---------- a : array_like Data array axis : int or None, optional Axis along which to calculate statistics. Default 0. If None, compute over the whole array `a`. ddof : int, optional degree of freedom (default 0); note that default ddof is different from the same routine in stats.describe bias : bool, optional If False, then the skewness and kurtosis calculations are corrected for statistical bias. Returns ------- nobs : int (size of the data (discarding missing values) minmax : (int, int) min, max mean : float arithmetic mean variance : float unbiased variance skewness : float biased skewness kurtosis : float biased kurtosis Examples -------- >>> from scipy.stats.mstats import describe >>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1]) >>> describe(ma) DescribeResult(nobs=3, minmax=(masked_array(data=0, mask=False, fill_value=999999), masked_array(data=2, mask=False, fill_value=999999)), mean=1.0, variance=0.6666666666666666, skewness=masked_array(data=0., mask=False, fill_value=1e+20), kurtosis=-1.5) """ a, axis = _chk_asarray(a, axis) n = a.count(axis) mm = (ma.minimum.reduce(a), ma.maximum.reduce(a)) m = a.mean(axis) v = a.var(axis, ddof=ddof) sk = skew(a, axis, bias=bias) kurt = kurtosis(a, axis, bias=bias) return DescribeResult(n, mm, m, v, sk, kurt) def stde_median(data, axis=None): """Returns the McKean-Schrader estimate of the standard error of the sample median along the given axis. masked values are discarded. Parameters ---------- data : ndarray Data to trim. axis : {None,int}, optional Axis along which to perform the trimming. If None, the input array is first flattened. """ def _stdemed_1D(data): data = np.sort(data.compressed()) n = len(data) z = 2.5758293035489004 k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0)) return ((data[n-k] - data[k-1])/(2.*z)) data = ma.array(data, copy=False, subok=True) if (axis is None): return _stdemed_1D(data) else: if data.ndim > 2: raise ValueError("Array 'data' must be at most two dimensional, " "but got data.ndim = %d" % data.ndim) return ma.apply_along_axis(_stdemed_1D, axis, data) SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue')) def skewtest(a, axis=0): """ Tests whether the skew is different from the normal distribution. Parameters ---------- a : array The data to be tested axis : int or None, optional Axis along which statistics are calculated. Default is 0. If None, compute over the whole array `a`. Returns ------- statistic : float The computed z-score for this test. pvalue : float a 2-sided p-value for the hypothesis test Notes ----- For more details about `skewtest`, see `stats.skewtest`. """ a, axis = _chk_asarray(a, axis) if axis is None: a = a.ravel() axis = 0 b2 = skew(a,axis) n = a.count(axis) if np.min(n) < 8: raise ValueError( "skewtest is not valid with less than 8 samples; %i samples" " were given." % np.min(n)) y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2))) beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9)) W2 = -1 + ma.sqrt(2*(beta2-1)) delta = 1/ma.sqrt(0.5*ma.log(W2)) alpha = ma.sqrt(2.0/(W2-1)) y = ma.where(y == 0, 1, y) Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1)) return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue')) def kurtosistest(a, axis=0): """ Tests whether a dataset has normal kurtosis Parameters ---------- a : array array of the sample data axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. Returns ------- statistic : float The computed z-score for this test. pvalue : float The 2-sided p-value for the hypothesis test Notes ----- For more details about `kurtosistest`, see `stats.kurtosistest`. """ a, axis = _chk_asarray(a, axis) n = a.count(axis=axis) if np.min(n) < 5: raise ValueError( "kurtosistest requires at least 5 observations; %i observations" " were given." % np.min(n)) if np.min(n) < 20: warnings.warn( "kurtosistest only valid for n>=20 ... continuing anyway, n=%i" % np.min(n)) b2 = kurtosis(a, axis, fisher=False) E = 3.0*(n-1) / (n+1) varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) x = (b2-E)/ma.sqrt(varb2) sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) / (n*(n-2)*(n-3))) A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2))) term1 = 1 - 2./(9.0*A) denom = 1 + x*ma.sqrt(2/(A-4.0)) if np.ma.isMaskedArray(denom): # For multi-dimensional array input denom[denom < 0] = masked elif denom < 0: denom = masked term2 = ma.power((1-2.0/A)/denom,1/3.0) Z = (term1 - term2) / np.sqrt(2/(9.0*A)) return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z))) NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue')) def normaltest(a, axis=0): """ Tests whether a sample differs from a normal distribution. Parameters ---------- a : array_like The array containing the data to be tested. axis : int or None, optional Axis along which to compute test. Default is 0. If None, compute over the whole array `a`. Returns ------- statistic : float or array ``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and ``k`` is the z-score returned by `kurtosistest`. pvalue : float or array A 2-sided chi squared probability for the hypothesis test. Notes ----- For more details about `normaltest`, see `stats.normaltest`. """ a, axis = _chk_asarray(a, axis) s, _ = skewtest(a, axis) k, _ = kurtosistest(a, axis) k2 = s*s + k*k return NormaltestResult(k2, distributions.chi2.sf(k2, 2)) def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None, limit=()): """ Computes empirical quantiles for a data array. Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``, where ``x[j]`` is the j-th order statistic, and gamma is a function of ``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and ``g = n*p + m - j``. Reinterpreting the above equations to compare to **R** lead to the equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)`` Typical values of (alphap,betap) are: - (0,1) : ``p(k) = k/n`` : linear interpolation of cdf (**R** type 4) - (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function (**R** type 5) - (0,0) : ``p(k) = k/(n+1)`` : (**R** type 6) - (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])]. (**R** type 7, **R** default) - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])]. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (**R** type 8) - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom. The resulting quantile estimates are approximately unbiased if x is normally distributed (**R** type 9) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM Parameters ---------- a : array_like Input data, as a sequence or array of dimension at most 2. prob : array_like, optional List of quantiles to compute. alphap : float, optional Plotting positions parameter, default is 0.4. betap : float, optional Plotting positions parameter, default is 0.4. axis : int, optional Axis along which to perform the trimming. If None (default), the input array is first flattened. limit : tuple, optional Tuple of (lower, upper) values. Values of `a` outside this open interval are ignored. Returns ------- mquantiles : MaskedArray An array containing the calculated quantiles. Notes ----- This formulation is very similar to **R** except the calculation of ``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined with each type. References ---------- .. [1] *R* statistical software: http://www.r-project.org/ .. [2] *R* ``quantile`` function: http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html Examples -------- >>> from scipy.stats.mstats import mquantiles >>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.]) >>> mquantiles(a) array([ 19.2, 40. , 42.8]) Using a 2D array, specifying axis and limit. >>> data = np.array([[ 6., 7., 1.], ... [ 47., 15., 2.], ... [ 49., 36., 3.], ... [ 15., 39., 4.], ... [ 42., 40., -999.], ... [ 41., 41., -999.], ... [ 7., -999., -999.], ... [ 39., -999., -999.], ... [ 43., -999., -999.], ... [ 40., -999., -999.], ... [ 36., -999., -999.]]) >>> print(mquantiles(data, axis=0, limit=(0, 50))) [[19.2 14.6 1.45] [40. 37.5 2.5 ] [42.8 40.05 3.55]] >>> data[:, 2] = -999. >>> print(mquantiles(data, axis=0, limit=(0, 50))) [[19.200000000000003 14.6 --] [40.0 37.5 --] [42.800000000000004 40.05 --]] """ def _quantiles1D(data,m,p): x = np.sort(data.compressed()) n = len(x) if n == 0: return ma.array(np.empty(len(p), dtype=float), mask=True) elif n == 1: return ma.array(np.resize(x, p.shape), mask=nomask) aleph = (n*p + m) k = np.floor(aleph.clip(1, n-1)).astype(int) gamma = (aleph-k).clip(0,1) return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()] data = ma.array(a, copy=False) if data.ndim > 2: raise TypeError("Array should be 2D at most !") if limit: condition = (limit[0] < data) & (data < limit[1]) data[~condition.filled(True)] = masked p = np.array(prob, copy=False, ndmin=1) m = alphap + p*(1.-alphap-betap) # Computes quantiles along axis (or globally) if (axis is None): return _quantiles1D(data, m, p) return ma.apply_along_axis(_quantiles1D, axis, data, m, p) def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4): """Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile """ if (per < 0) or (per > 100.): raise ValueError("The percentile should be between 0. and 100. !" " (got %s)" % per) return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap, limit=limit, axis=0).squeeze() def plotting_positions(data, alpha=0.4, beta=0.4): """ Returns plotting positions (or empirical percentile points) for the data. Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where: - i is the rank order statistics - n is the number of unmasked values along the given axis - `alpha` and `beta` are two parameters. Typical values for `alpha` and `beta` are: - (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4) - (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function (R, type 5) - (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6) - (1,1) : ``p(k) = (k-1)/(n-1)``, in this case, ``p(k) = mode[F(x[k])]``. That's R default (R type 7) - (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then ``p(k) ~ median[F(x[k])]``. The resulting quantile estimates are approximately median-unbiased regardless of the distribution of x. (R type 8) - (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom. The resulting quantile estimates are approximately unbiased if x is normally distributed (R type 9) - (.4,.4) : approximately quantile unbiased (Cunnane) - (.35,.35): APL, used with PWM - (.3175, .3175): used in scipy.stats.probplot Parameters ---------- data : array_like Input data, as a sequence or array of dimension at most 2. alpha : float, optional Plotting positions parameter. Default is 0.4. beta : float, optional Plotting positions parameter. Default is 0.4. Returns ------- positions : MaskedArray The calculated plotting positions. """ data = ma.array(data, copy=False).reshape(1,-1) n = data.count() plpos = np.empty(data.size, dtype=float) plpos[n:] = 0 plpos[data.argsort(axis=None)[:n]] = ((np.arange(1, n+1) - alpha) / (n + 1.0 - alpha - beta)) return ma.array(plpos, mask=data._mask) meppf = plotting_positions def obrientransform(*args): """ Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in ``*args`` is one level of a factor. If an `f_oneway()` run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Returns: transformed data for use in an ANOVA """ data = argstoarray(*args).T v = data.var(axis=0,ddof=1) m = data.mean(0) n = data.count(0).astype(float) # result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2)) data -= m data **= 2 data *= (n-1.5)*n data -= 0.5*v*(n-1) data /= (n-1.)*(n-2.) if not ma.allclose(v,data.mean(0)): raise ValueError("Lack of convergence in obrientransform.") return data def sem(a, axis=0, ddof=1): """ Calculates the standard error of the mean of the input array. Also sometimes called standard error of measurement. Parameters ---------- a : array_like An array containing the values for which the standard error is returned. axis : int or None, optional If axis is None, ravel `a` first. If axis is an integer, this will be the axis over which to operate. Defaults to 0. ddof : int, optional Delta degrees-of-freedom. How many degrees of freedom to adjust for bias in limited samples relative to the population estimate of variance. Defaults to 1. Returns ------- s : ndarray or float The standard error of the mean in the sample(s), along the input axis. Notes ----- The default value for `ddof` changed in scipy 0.15.0 to be consistent with `stats.sem` as well as with the most common definition used (like in the R documentation). Examples -------- Find standard error along the first axis: >>> from scipy import stats >>> a = np.arange(20).reshape(5,4) >>> print(stats.mstats.sem(a)) [2.8284271247461903 2.8284271247461903 2.8284271247461903 2.8284271247461903] Find standard error across the whole array, using n degrees of freedom: >>> print(stats.mstats.sem(a, axis=None, ddof=0)) 1.2893796958227628 """ a, axis = _chk_asarray(a, axis) n = a.count(axis=axis) s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n) return s F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue')) def f_oneway(*args): """ Performs a 1-way ANOVA, returning an F-value and probability given any number of groups. From Heiman, pp.394-7. Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays, one per treatment group. Returns ------- statistic : float The computed F-value of the test. pvalue : float The associated p-value from the F-distribution. """ # Construct a single array of arguments: each row is a group data = argstoarray(*args) ngroups = len(data) ntot = data.count() sstot = (data**2).sum() - (data.sum())**2/float(ntot) ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum() sswg = sstot-ssbg dfbg = ngroups-1 dfwg = ntot - ngroups msb = ssbg/float(dfbg) msw = sswg/float(dfwg) f = msb/msw prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf return F_onewayResult(f, prob) FriedmanchisquareResult = namedtuple('FriedmanchisquareResult', ('statistic', 'pvalue')) def friedmanchisquare(*args): """Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA. This function calculates the Friedman Chi-square test for repeated measures and returns the result, along with the associated probability value. Each input is considered a given group. Ideally, the number of treatments among each group should be equal. If this is not the case, only the first n treatments are taken into account, where n is the number of treatments of the smallest group. If a group has some missing values, the corresponding treatments are masked in the other groups. The test statistic is corrected for ties. Masked values in one group are propagated to the other groups. Returns ------- statistic : float the test statistic. pvalue : float the associated p-value. """ data = argstoarray(*args).astype(float) k = len(data) if k < 3: raise ValueError("Less than 3 groups (%i): " % k + "the Friedman test is NOT appropriate.") ranked = ma.masked_values(rankdata(data, axis=0), 0) if ranked._mask is not nomask: ranked = ma.mask_cols(ranked) ranked = ranked.compressed().reshape(k,-1).view(ndarray) else: ranked = ranked._data (k,n) = ranked.shape # Ties correction repeats = [find_repeats(row) for row in ranked.T] ties = np.array([y for x, y in repeats if x.size > 0]) tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k)) ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2) chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k-1))
82,352
30.456455
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/__init__.py
""" ========================================== Statistical functions (:mod:`scipy.stats`) ========================================== .. module:: scipy.stats This module contains a large number of probability distributions as well as a growing library of statistical functions. Each univariate distribution is an instance of a subclass of `rv_continuous` (`rv_discrete` for discrete distributions): .. autosummary:: :toctree: generated/ rv_continuous rv_discrete rv_histogram Continuous distributions ======================== .. autosummary:: :toctree: generated/ alpha -- Alpha anglit -- Anglit arcsine -- Arcsine argus -- Argus beta -- Beta betaprime -- Beta Prime bradford -- Bradford burr -- Burr (Type III) burr12 -- Burr (Type XII) cauchy -- Cauchy chi -- Chi chi2 -- Chi-squared cosine -- Cosine crystalball -- Crystalball dgamma -- Double Gamma dweibull -- Double Weibull erlang -- Erlang expon -- Exponential exponnorm -- Exponentially Modified Normal exponweib -- Exponentiated Weibull exponpow -- Exponential Power f -- F (Snecdor F) fatiguelife -- Fatigue Life (Birnbaum-Saunders) fisk -- Fisk foldcauchy -- Folded Cauchy foldnorm -- Folded Normal frechet_r -- Deprecated. Alias for weibull_min frechet_l -- Deprecated. Alias for weibull_max genlogistic -- Generalized Logistic gennorm -- Generalized normal genpareto -- Generalized Pareto genexpon -- Generalized Exponential genextreme -- Generalized Extreme Value gausshyper -- Gauss Hypergeometric gamma -- Gamma gengamma -- Generalized gamma genhalflogistic -- Generalized Half Logistic gilbrat -- Gilbrat gompertz -- Gompertz (Truncated Gumbel) gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I gumbel_l -- Left Sided Gumbel, etc. halfcauchy -- Half Cauchy halflogistic -- Half Logistic halfnorm -- Half Normal halfgennorm -- Generalized Half Normal hypsecant -- Hyperbolic Secant invgamma -- Inverse Gamma invgauss -- Inverse Gaussian invweibull -- Inverse Weibull johnsonsb -- Johnson SB johnsonsu -- Johnson SU kappa4 -- Kappa 4 parameter kappa3 -- Kappa 3 parameter ksone -- Kolmogorov-Smirnov one-sided (no stats) kstwobign -- Kolmogorov-Smirnov two-sided test for Large N (no stats) laplace -- Laplace levy -- Levy levy_l levy_stable logistic -- Logistic loggamma -- Log-Gamma loglaplace -- Log-Laplace (Log Double Exponential) lognorm -- Log-Normal lomax -- Lomax (Pareto of the second kind) maxwell -- Maxwell mielke -- Mielke's Beta-Kappa moyal -- Moyal nakagami -- Nakagami ncx2 -- Non-central chi-squared ncf -- Non-central F nct -- Non-central Student's T norm -- Normal (Gaussian) norminvgauss -- Normal Inverse Gaussian pareto -- Pareto pearson3 -- Pearson type III powerlaw -- Power-function powerlognorm -- Power log normal powernorm -- Power normal rdist -- R-distribution reciprocal -- Reciprocal rayleigh -- Rayleigh rice -- Rice recipinvgauss -- Reciprocal Inverse Gaussian semicircular -- Semicircular skewnorm -- Skew normal t -- Student's T trapz -- Trapezoidal triang -- Triangular truncexpon -- Truncated Exponential truncnorm -- Truncated Normal tukeylambda -- Tukey-Lambda uniform -- Uniform vonmises -- Von-Mises (Circular) vonmises_line -- Von-Mises (Line) wald -- Wald weibull_min -- Minimum Weibull (see Frechet) weibull_max -- Maximum Weibull (see Frechet) wrapcauchy -- Wrapped Cauchy Multivariate distributions ========================== .. autosummary:: :toctree: generated/ multivariate_normal -- Multivariate normal distribution matrix_normal -- Matrix normal distribution dirichlet -- Dirichlet wishart -- Wishart invwishart -- Inverse Wishart multinomial -- Multinomial distribution special_ortho_group -- SO(N) group ortho_group -- O(N) group unitary_group -- U(N) group random_correlation -- random correlation matrices Discrete distributions ====================== .. autosummary:: :toctree: generated/ bernoulli -- Bernoulli binom -- Binomial boltzmann -- Boltzmann (Truncated Discrete Exponential) dlaplace -- Discrete Laplacian geom -- Geometric hypergeom -- Hypergeometric logser -- Logarithmic (Log-Series, Series) nbinom -- Negative Binomial planck -- Planck (Discrete Exponential) poisson -- Poisson randint -- Discrete Uniform skellam -- Skellam zipf -- Zipf Statistical functions ===================== Several of these functions have a similar version in scipy.stats.mstats which work for masked arrays. .. autosummary:: :toctree: generated/ describe -- Descriptive statistics gmean -- Geometric mean hmean -- Harmonic mean kurtosis -- Fisher or Pearson kurtosis kurtosistest -- mode -- Modal value moment -- Central moment normaltest -- skew -- Skewness skewtest -- kstat -- kstatvar -- tmean -- Truncated arithmetic mean tvar -- Truncated variance tmin -- tmax -- tstd -- tsem -- variation -- Coefficient of variation find_repeats trim_mean .. autosummary:: :toctree: generated/ cumfreq itemfreq percentileofscore scoreatpercentile relfreq .. autosummary:: :toctree: generated/ binned_statistic -- Compute a binned statistic for a set of data. binned_statistic_2d -- Compute a 2-D binned statistic for a set of data. binned_statistic_dd -- Compute a d-D binned statistic for a set of data. .. autosummary:: :toctree: generated/ obrientransform bayes_mvs mvsdist sem zmap zscore iqr .. autosummary:: :toctree: generated/ sigmaclip trimboth trim1 .. autosummary:: :toctree: generated/ f_oneway pearsonr spearmanr pointbiserialr kendalltau weightedtau linregress theilslopes .. autosummary:: :toctree: generated/ ttest_1samp ttest_ind ttest_ind_from_stats ttest_rel kstest chisquare power_divergence ks_2samp mannwhitneyu tiecorrect rankdata ranksums wilcoxon kruskal friedmanchisquare combine_pvalues jarque_bera .. autosummary:: :toctree: generated/ ansari bartlett levene shapiro anderson anderson_ksamp binom_test fligner median_test mood .. autosummary:: :toctree: generated/ boxcox boxcox_normmax boxcox_llf entropy .. autosummary:: :toctree: generated/ wasserstein_distance energy_distance Circular statistical functions ============================== .. autosummary:: :toctree: generated/ circmean circvar circstd Contingency table functions =========================== .. autosummary:: :toctree: generated/ chi2_contingency contingency.expected_freq contingency.margins fisher_exact Plot-tests ========== .. autosummary:: :toctree: generated/ ppcc_max ppcc_plot probplot boxcox_normplot Masked statistics functions =========================== .. toctree:: stats.mstats Univariate and multivariate kernel density estimation (:mod:`scipy.stats.kde`) ============================================================================== .. autosummary:: :toctree: generated/ gaussian_kde For many more stat related functions install the software R and the interface package rpy. """ from __future__ import division, print_function, absolute_import from .stats import * from .distributions import * from .morestats import * from ._binned_statistic import * from .kde import gaussian_kde from . import mstats from .contingency import chi2_contingency from ._multivariate import * __all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders. from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
9,284
24.86351
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_distn_infrastructure.py
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from __future__ import division, print_function, absolute_import from scipy._lib.six import string_types, exec_, PY3 from scipy._lib._util import getargspec_no_self as _getargspec import sys import keyword import re import types import warnings from scipy.misc import doccer from ._distr_params import distcont, distdiscrete from scipy._lib._util import check_random_state, _lazywhere, _lazyselect from scipy._lib._util import _valarray as valarray from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive) # for root finding for discrete distribution ppf, and max likelihood estimation from scipy import optimize # for functions of continuous distributions (e.g. moments, entropy, cdf) from scipy import integrate # to approximate the pdf of a continuous distribution given its cdf from scipy.misc import derivative from numpy import (arange, putmask, ravel, take, ones, shape, ndarray, product, reshape, zeros, floor, logical_and, log, sqrt, exp) from numpy import (place, argsort, argmax, vectorize, asarray, nan, inf, isinf, NINF, empty) import numpy as np from ._constants import _XMAX if PY3: def instancemethod(func, obj, cls): return types.MethodType(func, obj) else: instancemethod = types.MethodType # These are the docstring parts used for substitution in specific # distribution docstrings docheaders = {'methods': """\nMethods\n-------\n""", 'notes': """\nNotes\n-----\n""", 'examples': """\nExamples\n--------\n"""} _doc_rvs = """\ rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None) Random variates. """ _doc_pdf = """\ pdf(x, %(shapes)s, loc=0, scale=1) Probability density function. """ _doc_logpdf = """\ logpdf(x, %(shapes)s, loc=0, scale=1) Log of the probability density function. """ _doc_pmf = """\ pmf(k, %(shapes)s, loc=0, scale=1) Probability mass function. """ _doc_logpmf = """\ logpmf(k, %(shapes)s, loc=0, scale=1) Log of the probability mass function. """ _doc_cdf = """\ cdf(x, %(shapes)s, loc=0, scale=1) Cumulative distribution function. """ _doc_logcdf = """\ logcdf(x, %(shapes)s, loc=0, scale=1) Log of the cumulative distribution function. """ _doc_sf = """\ sf(x, %(shapes)s, loc=0, scale=1) Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate). """ _doc_logsf = """\ logsf(x, %(shapes)s, loc=0, scale=1) Log of the survival function. """ _doc_ppf = """\ ppf(q, %(shapes)s, loc=0, scale=1) Percent point function (inverse of ``cdf`` --- percentiles). """ _doc_isf = """\ isf(q, %(shapes)s, loc=0, scale=1) Inverse survival function (inverse of ``sf``). """ _doc_moment = """\ moment(n, %(shapes)s, loc=0, scale=1) Non-central moment of order n """ _doc_stats = """\ stats(%(shapes)s, loc=0, scale=1, moments='mv') Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). """ _doc_entropy = """\ entropy(%(shapes)s, loc=0, scale=1) (Differential) entropy of the RV. """ _doc_fit = """\ fit(data, %(shapes)s, loc=0, scale=1) Parameter estimates for generic data. """ _doc_expect = """\ expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) Expected value of a function (of one argument) with respect to the distribution. """ _doc_expect_discrete = """\ expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False) Expected value of a function (of one argument) with respect to the distribution. """ _doc_median = """\ median(%(shapes)s, loc=0, scale=1) Median of the distribution. """ _doc_mean = """\ mean(%(shapes)s, loc=0, scale=1) Mean of the distribution. """ _doc_var = """\ var(%(shapes)s, loc=0, scale=1) Variance of the distribution. """ _doc_std = """\ std(%(shapes)s, loc=0, scale=1) Standard deviation of the distribution. """ _doc_interval = """\ interval(alpha, %(shapes)s, loc=0, scale=1) Endpoints of the range that contains alpha percent of the distribution """ _doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, _doc_stats, _doc_entropy, _doc_fit, _doc_expect, _doc_median, _doc_mean, _doc_var, _doc_std, _doc_interval]) _doc_default_longsummary = """\ As an instance of the `rv_continuous` class, `%(name)s` object inherits from it a collection of generic methods (see below for the full list), and completes them with details specific for this particular distribution. """ _doc_default_frozen_note = """ Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = %(name)s(%(shapes)s, loc=0, scale=1) - Frozen RV object with the same methods but holding the given shape, location, and scale fixed. """ _doc_default_example = """\ Examples -------- >>> from scipy.stats import %(name)s >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) Calculate a few first moments: %(set_vals_stmt)s >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') Display the probability density function (``pdf``): >>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s), ... %(name)s.ppf(0.99, %(shapes)s), 100) >>> ax.plot(x, %(name)s.pdf(x, %(shapes)s), ... 'r-', lw=5, alpha=0.6, label='%(name)s pdf') Alternatively, the distribution object can be called (as a function) to fix the shape, location and scale parameters. This returns a "frozen" RV object holding the given parameters fixed. Freeze the distribution and display the frozen ``pdf``: >>> rv = %(name)s(%(shapes)s) >>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf') Check accuracy of ``cdf`` and ``ppf``: >>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s) >>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s)) True Generate random numbers: >>> r = %(name)s.rvs(%(shapes)s, size=1000) And compare the histogram: >>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2) >>> ax.legend(loc='best', frameon=False) >>> plt.show() """ _doc_default_locscale = """\ The probability density above is defined in the "standardized" form. To shift and/or scale the distribution use the ``loc`` and ``scale`` parameters. Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with ``y = (x - loc) / scale``. """ _doc_default = ''.join([_doc_default_longsummary, _doc_allmethods, '\n', _doc_default_example]) _doc_default_before_notes = ''.join([_doc_default_longsummary, _doc_allmethods]) docdict = { 'rvs': _doc_rvs, 'pdf': _doc_pdf, 'logpdf': _doc_logpdf, 'cdf': _doc_cdf, 'logcdf': _doc_logcdf, 'sf': _doc_sf, 'logsf': _doc_logsf, 'ppf': _doc_ppf, 'isf': _doc_isf, 'stats': _doc_stats, 'entropy': _doc_entropy, 'fit': _doc_fit, 'moment': _doc_moment, 'expect': _doc_expect, 'interval': _doc_interval, 'mean': _doc_mean, 'std': _doc_std, 'var': _doc_var, 'median': _doc_median, 'allmethods': _doc_allmethods, 'longsummary': _doc_default_longsummary, 'frozennote': _doc_default_frozen_note, 'example': _doc_default_example, 'default': _doc_default, 'before_notes': _doc_default_before_notes, 'after_notes': _doc_default_locscale } # Reuse common content between continuous and discrete docs, change some # minor bits. docdict_discrete = docdict.copy() docdict_discrete['pmf'] = _doc_pmf docdict_discrete['logpmf'] = _doc_logpmf docdict_discrete['expect'] = _doc_expect_discrete _doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', 'mean', 'var', 'std', 'interval'] for obj in _doc_disc_methods: docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') _doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf'] for obj in _doc_disc_methods_err_varname: docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ') docdict_discrete.pop('pdf') docdict_discrete.pop('logpdf') _doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods docdict_discrete['longsummary'] = _doc_default_longsummary.replace( 'rv_continuous', 'rv_discrete') _doc_default_frozen_note = """ Alternatively, the object may be called (as a function) to fix the shape and location parameters returning a "frozen" discrete RV object: rv = %(name)s(%(shapes)s, loc=0) - Frozen RV object with the same methods but holding the given shape and location fixed. """ docdict_discrete['frozennote'] = _doc_default_frozen_note _doc_default_discrete_example = """\ Examples -------- >>> from scipy.stats import %(name)s >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) Calculate a few first moments: %(set_vals_stmt)s >>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk') Display the probability mass function (``pmf``): >>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s), ... %(name)s.ppf(0.99, %(shapes)s)) >>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf') >>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5) Alternatively, the distribution object can be called (as a function) to fix the shape and location. This returns a "frozen" RV object holding the given parameters fixed. Freeze the distribution and display the frozen ``pmf``: >>> rv = %(name)s(%(shapes)s) >>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1, ... label='frozen pmf') >>> ax.legend(loc='best', frameon=False) >>> plt.show() Check accuracy of ``cdf`` and ``ppf``: >>> prob = %(name)s.cdf(x, %(shapes)s) >>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s)) True Generate random numbers: >>> r = %(name)s.rvs(%(shapes)s, size=1000) """ _doc_default_discrete_locscale = """\ The probability mass function above is defined in the "standardized" form. To shift distribution use the ``loc`` parameter. Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``. """ docdict_discrete['example'] = _doc_default_discrete_example docdict_discrete['after_notes'] = _doc_default_discrete_locscale _doc_default_before_notes = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods']]) docdict_discrete['before_notes'] = _doc_default_before_notes _doc_default_disc = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods'], docdict_discrete['frozennote'], docdict_discrete['example']]) docdict_discrete['default'] = _doc_default_disc # clean up all the separate docstring elements, we do not need them anymore for obj in [s for s in dir() if s.startswith('_doc_')]: exec('del ' + obj) del obj try: del s except NameError: # in Python 3, loop variables are not visible after the loop pass def _moment(data, n, mu=None): if mu is None: mu = data.mean() return ((data - mu)**n).mean() def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): if (n == 0): return 1.0 elif (n == 1): if mu is None: val = moment_func(1, *args) else: val = mu elif (n == 2): if mu2 is None or mu is None: val = moment_func(2, *args) else: val = mu2 + mu*mu elif (n == 3): if g1 is None or mu2 is None or mu is None: val = moment_func(3, *args) else: mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment elif (n == 4): if g1 is None or g2 is None or mu2 is None or mu is None: val = moment_func(4, *args) else: mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment mu3 = g1*np.power(mu2, 1.5) # 3rd central moment val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu else: val = moment_func(n, *args) return val def _skew(data): """ skew is third central moment / variance**(1.5) """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m3 = ((data - mu)**3).mean() return m3 / np.power(m2, 1.5) def _kurtosis(data): """ kurtosis is fourth central moment / variance**2 - 3 """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m4 = ((data - mu)**4).mean() return m4 / m2**2 - 3 # Frozen RV class class rv_frozen(object): def __init__(self, dist, *args, **kwds): self.args = args self.kwds = kwds # create a new instance self.dist = dist.__class__(**dist._updated_ctor_param()) # a, b may be set in _argcheck, depending on *args, **kwds. Ouch. shapes, _, _ = self.dist._parse_args(*args, **kwds) self.dist._argcheck(*shapes) self.a, self.b = self.dist.a, self.dist.b @property def random_state(self): return self.dist._random_state @random_state.setter def random_state(self, seed): self.dist._random_state = check_random_state(seed) def pdf(self, x): # raises AttributeError in frozen discrete distribution return self.dist.pdf(x, *self.args, **self.kwds) def logpdf(self, x): return self.dist.logpdf(x, *self.args, **self.kwds) def cdf(self, x): return self.dist.cdf(x, *self.args, **self.kwds) def logcdf(self, x): return self.dist.logcdf(x, *self.args, **self.kwds) def ppf(self, q): return self.dist.ppf(q, *self.args, **self.kwds) def isf(self, q): return self.dist.isf(q, *self.args, **self.kwds) def rvs(self, size=None, random_state=None): kwds = self.kwds.copy() kwds.update({'size': size, 'random_state': random_state}) return self.dist.rvs(*self.args, **kwds) def sf(self, x): return self.dist.sf(x, *self.args, **self.kwds) def logsf(self, x): return self.dist.logsf(x, *self.args, **self.kwds) def stats(self, moments='mv'): kwds = self.kwds.copy() kwds.update({'moments': moments}) return self.dist.stats(*self.args, **kwds) def median(self): return self.dist.median(*self.args, **self.kwds) def mean(self): return self.dist.mean(*self.args, **self.kwds) def var(self): return self.dist.var(*self.args, **self.kwds) def std(self): return self.dist.std(*self.args, **self.kwds) def moment(self, n): return self.dist.moment(n, *self.args, **self.kwds) def entropy(self): return self.dist.entropy(*self.args, **self.kwds) def pmf(self, k): return self.dist.pmf(k, *self.args, **self.kwds) def logpmf(self, k): return self.dist.logpmf(k, *self.args, **self.kwds) def interval(self, alpha): return self.dist.interval(alpha, *self.args, **self.kwds) def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds): # expect method only accepts shape parameters as positional args # hence convert self.args, self.kwds, also loc/scale # See the .expect method docstrings for the meaning of # other parameters. a, loc, scale = self.dist._parse_args(*self.args, **self.kwds) if isinstance(self.dist, rv_discrete): return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds) else: return self.dist.expect(func, a, loc, scale, lb, ub, conditional, **kwds) # This should be rewritten def argsreduce(cond, *args): """Return the sequence of ravel(args[i]) where ravel(condition) is True in 1D. Examples -------- >>> import numpy as np >>> rand = np.random.random_sample >>> A = rand((4, 5)) >>> B = 2 >>> C = rand((1, 5)) >>> cond = np.ones(A.shape) >>> [A1, B1, C1] = argsreduce(cond, A, B, C) >>> B1.shape (20,) >>> cond[2,:] = 0 >>> [A2, B2, C2] = argsreduce(cond, A, B, C) >>> B2.shape (15,) """ newargs = np.atleast_1d(*args) if not isinstance(newargs, list): newargs = [newargs, ] expand_arr = (cond == cond) return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs] parse_arg_template = """ def _parse_args(self, %(shape_arg_str)s %(locscale_in)s): return (%(shape_arg_str)s), %(locscale_out)s def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None): return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size) def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'): return (%(shape_arg_str)s), %(locscale_out)s, moments """ # Both the continuous and discrete distributions depend on ncx2. # I think the function name ncx2 is an abbreviation for noncentral chi squared. def _ncx2_log_pdf(x, df, nc): # We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor # of exp(-xs*ns) into the ive function to improve numerical stability # at large values of xs. See also `rice.pdf`. df2 = df/2.0 - 1.0 xs, ns = np.sqrt(x), np.sqrt(nc) res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2 res += np.log(ive(df2, xs*ns) / 2.0) return res def _ncx2_pdf(x, df, nc): return np.exp(_ncx2_log_pdf(x, df, nc)) def _ncx2_cdf(x, df, nc): return chndtr(x, df, nc) class rv_generic(object): """Class which encapsulates common functionality between rv_discrete and rv_continuous. """ def __init__(self, seed=None): super(rv_generic, self).__init__() # figure out if _stats signature has 'moments' keyword sign = _getargspec(self._stats) self._stats_has_moments = ((sign[2] is not None) or ('moments' in sign[0])) self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def __getstate__(self): return self._updated_ctor_param(), self._random_state def __setstate__(self, state): ctor_param, r = state self.__init__(**ctor_param) self._random_state = r return self def _construct_argparser( self, meths_to_inspect, locscale_in, locscale_out): """Construct the parser for the shape arguments. Generates the argument-parsing functions dynamically and attaches them to the instance. Is supposed to be called in __init__ of a class for each distribution. If self.shapes is a non-empty string, interprets it as a comma-separated list of shape parameters. Otherwise inspects the call signatures of `meths_to_inspect` and constructs the argument-parsing functions from these. In this case also sets `shapes` and `numargs`. """ if self.shapes: # sanitize the user-supplied shapes if not isinstance(self.shapes, string_types): raise TypeError('shapes must be a string.') shapes = self.shapes.replace(',', ' ').split() for field in shapes: if keyword.iskeyword(field): raise SyntaxError('keywords cannot be used as shapes.') if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field): raise SyntaxError( 'shapes must be valid python identifiers') else: # find out the call signatures (_pdf, _cdf etc), deduce shape # arguments. Generic methods only have 'self, x', any further args # are shapes. shapes_list = [] for meth in meths_to_inspect: shapes_args = _getargspec(meth) # NB: does not contain self args = shapes_args.args[1:] # peel off 'x', too if args: shapes_list.append(args) # *args or **kwargs are not allowed w/automatic shapes if shapes_args.varargs is not None: raise TypeError( '*args are not allowed w/out explicit shapes') if shapes_args.keywords is not None: raise TypeError( '**kwds are not allowed w/out explicit shapes') if shapes_args.defaults is not None: raise TypeError('defaults are not allowed for shapes') if shapes_list: shapes = shapes_list[0] # make sure the signatures are consistent for item in shapes_list: if item != shapes: raise TypeError('Shape arguments are inconsistent.') else: shapes = [] # have the arguments, construct the method from template shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None dct = dict(shape_arg_str=shapes_str, locscale_in=locscale_in, locscale_out=locscale_out, ) ns = {} exec_(parse_arg_template % dct, ns) # NB: attach to the instance, not class for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']: setattr(self, name, instancemethod(ns[name], self, self.__class__) ) self.shapes = ', '.join(shapes) if shapes else None if not hasattr(self, 'numargs'): # allows more general subclassing with *args self.numargs = len(shapes) def _construct_doc(self, docdict, shapes_vals=None): """Construct the instance docstring with string substitutions.""" tempdict = docdict.copy() tempdict['name'] = self.name or 'distname' tempdict['shapes'] = self.shapes or '' if shapes_vals is None: shapes_vals = () vals = ', '.join('%.3g' % val for val in shapes_vals) tempdict['vals'] = vals tempdict['shapes_'] = self.shapes or '' if self.shapes and self.numargs == 1: tempdict['shapes_'] += ',' if self.shapes: tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals) else: tempdict['set_vals_stmt'] = '' if self.shapes is None: # remove shapes from call parameters if there are none for item in ['default', 'before_notes']: tempdict[item] = tempdict[item].replace( "\n%(shapes)s : array_like\n shape parameters", "") for i in range(2): if self.shapes is None: # necessary because we use %(shapes)s in two forms (w w/o ", ") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = doccer.docformat(self.__doc__, tempdict) # correct for empty shapes self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')') def _construct_default_doc(self, longname=None, extradoc=None, docdict=None, discrete='continuous'): """Construct instance docstring from the default template.""" if longname is None: longname = 'A' if extradoc is None: extradoc = '' if extradoc.startswith('\n\n'): extradoc = extradoc[2:] self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete), '\n\n%(before_notes)s\n', docheaders['notes'], extradoc, '\n%(example)s']) self._construct_doc(docdict) def freeze(self, *args, **kwds): """Freeze the distribution for the given arguments. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution. Should include all the non-optional arguments, may include ``loc`` and ``scale``. Returns ------- rv_frozen : rv_frozen instance The frozen distribution. """ return rv_frozen(self, *args, **kwds) def __call__(self, *args, **kwds): return self.freeze(*args, **kwds) __call__.__doc__ = freeze.__doc__ # The actual calculation functions (no basic checking need be done) # If these are defined, the others won't be looked at. # Otherwise, the other set can be defined. def _stats(self, *args, **kwds): return None, None, None, None # Central moments def _munp(self, n, *args): # Silence floating point warnings from integration. olderr = np.seterr(all='ignore') vals = self.generic_moment(n, *args) np.seterr(**olderr) return vals def _argcheck_rvs(self, *args, **kwargs): # Handle broadcasting and size validation of the rvs method. # Subclasses should not have to override this method. # The rule is that if `size` is not None, then `size` gives the # shape of the result (integer values of `size` are treated as # tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.) # # `args` is expected to contain the shape parameters (if any), the # location and the scale in a flat tuple (e.g. if there are two # shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`). # The only keyword argument expected is 'size'. size = kwargs.get('size', None) all_bcast = np.broadcast_arrays(*args) def squeeze_left(a): while a.ndim > 0 and a.shape[0] == 1: a = a[0] return a # Eliminate trivial leading dimensions. In the convention # used by numpy's random variate generators, trivial leading # dimensions are effectively ignored. In other words, when `size` # is given, trivial leading dimensions of the broadcast parameters # in excess of the number of dimensions in size are ignored, e.g. # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3) # array([ 1.00104267, 3.00422496, 4.99799278]) # If `size` is not given, the exact broadcast shape is preserved: # >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]]) # array([[[[ 1.00862899, 3.00061431, 4.99867122]]]]) # all_bcast = [squeeze_left(a) for a in all_bcast] bcast_shape = all_bcast[0].shape bcast_ndim = all_bcast[0].ndim if size is None: size_ = bcast_shape else: size_ = tuple(np.atleast_1d(size)) # Check compatibility of size_ with the broadcast shape of all # the parameters. This check is intended to be consistent with # how the numpy random variate generators (e.g. np.random.normal, # np.random.beta) handle their arguments. The rule is that, if size # is given, it determines the shape of the output. Broadcasting # can't change the output size. # This is the standard broadcasting convention of extending the # shape with fewer dimensions with enough dimensions of length 1 # so that the two shapes have the same number of dimensions. ndiff = bcast_ndim - len(size_) if ndiff < 0: bcast_shape = (1,)*(-ndiff) + bcast_shape elif ndiff > 0: size_ = (1,)*ndiff + size_ # This compatibility test is not standard. In "regular" broadcasting, # two shapes are compatible if for each dimension, the lengths are the # same or one of the lengths is 1. Here, the length of a dimension in # size_ must not be less than the corresponding length in bcast_shape. ok = all([bcdim == 1 or bcdim == szdim for (bcdim, szdim) in zip(bcast_shape, size_)]) if not ok: raise ValueError("size does not match the broadcast shape of " "the parameters.") param_bcast = all_bcast[:-2] loc_bcast = all_bcast[-2] scale_bcast = all_bcast[-1] return param_bcast, loc_bcast, scale_bcast, size_ ## These are the methods you must define (standard form functions) ## NB: generic _pdf, _logpdf, _cdf are different for ## rv_continuous and rv_discrete hence are defined in there def _argcheck(self, *args): """Default check for correct values on args and keywords. Returns condition array of 1's where arguments are correct and 0's where they are not. """ cond = 1 for arg in args: cond = logical_and(cond, (asarray(arg) > 0)) return cond def _support_mask(self, x): return (self.a <= x) & (x <= self.b) def _open_support_mask(self, x): return (self.a < x) & (x < self.b) def _rvs(self, *args): # This method must handle self._size being a tuple, and it must # properly broadcast *args and self._size. self._size might be # an empty tuple, which means a scalar random variate is to be # generated. ## Use basic inverse cdf algorithm for RV generation as default. U = self._random_state.random_sample(self._size) Y = self._ppf(U, *args) return Y def _logcdf(self, x, *args): return log(self._cdf(x, *args)) def _sf(self, x, *args): return 1.0-self._cdf(x, *args) def _logsf(self, x, *args): return log(self._sf(x, *args)) def _ppf(self, q, *args): return self._ppfvec(q, *args) def _isf(self, q, *args): return self._ppf(1.0-q, *args) # use correct _ppf for subclasses # These are actually called, and should not be overwritten if you # want to keep error checking. def rvs(self, *args, **kwds): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). size : int or tuple of ints, optional Defining number of random variates (default is 1). random_state : None or int or ``np.random.RandomState`` instance, optional If int or RandomState, use it for drawing the random variates. If None, rely on ``self.random_state``. Default is None. Returns ------- rvs : ndarray or scalar Random variates of given `size`. """ discrete = kwds.pop('discrete', None) rndm = kwds.pop('random_state', None) args, loc, scale, size = self._parse_args_rvs(*args, **kwds) cond = logical_and(self._argcheck(*args), (scale >= 0)) if not np.all(cond): raise ValueError("Domain error in arguments.") if np.all(scale == 0): return loc*ones(size, 'd') # extra gymnastics needed for a custom random_state if rndm is not None: random_state_saved = self._random_state self._random_state = check_random_state(rndm) # `size` should just be an argument to _rvs(), but for, um, # historical reasons, it is made an attribute that is read # by _rvs(). self._size = size vals = self._rvs(*args) vals = vals * scale + loc # do not forget to restore the _random_state if rndm is not None: self._random_state = random_state_saved # Cast to int if discrete if discrete: if size == (): vals = int(vals) else: vals = vals.astype(int) return vals def stats(self, *args, **kwds): """ Some statistics of the given RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional (continuous RVs only) scale parameter (default=1) moments : str, optional composed of letters ['mvsk'] defining which moments to compute: 'm' = mean, 'v' = variance, 's' = (Fisher's) skew, 'k' = (Fisher's) kurtosis. (default is 'mv') Returns ------- stats : sequence of requested moments. """ args, loc, scale, moments = self._parse_args_stats(*args, **kwds) # scale = 1 by construction for discrete RVs loc, scale = map(asarray, (loc, scale)) args = tuple(map(asarray, args)) cond = self._argcheck(*args) & (scale > 0) & (loc == loc) output = [] default = valarray(shape(cond), self.badvalue) # Use only entries that are valid in calculation if np.any(cond): goodargs = argsreduce(cond, *(args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] if self._stats_has_moments: mu, mu2, g1, g2 = self._stats(*goodargs, **{'moments': moments}) else: mu, mu2, g1, g2 = self._stats(*goodargs) if g1 is None: mu3 = None else: if mu2 is None: mu2 = self._munp(2, *goodargs) if g2 is None: # (mu2**1.5) breaks down for nan and inf mu3 = g1 * np.power(mu2, 1.5) if 'm' in moments: if mu is None: mu = self._munp(1, *goodargs) out0 = default.copy() place(out0, cond, mu * scale + loc) output.append(out0) if 'v' in moments: if mu2 is None: mu2p = self._munp(2, *goodargs) if mu is None: mu = self._munp(1, *goodargs) mu2 = mu2p - mu * mu if np.isinf(mu): # if mean is inf then var is also inf mu2 = np.inf out0 = default.copy() place(out0, cond, mu2 * scale * scale) output.append(out0) if 's' in moments: if g1 is None: mu3p = self._munp(3, *goodargs) if mu is None: mu = self._munp(1, *goodargs) if mu2 is None: mu2p = self._munp(2, *goodargs) mu2 = mu2p - mu * mu with np.errstate(invalid='ignore'): mu3 = mu3p - 3 * mu * mu2 - mu**3 g1 = mu3 / np.power(mu2, 1.5) out0 = default.copy() place(out0, cond, g1) output.append(out0) if 'k' in moments: if g2 is None: mu4p = self._munp(4, *goodargs) if mu is None: mu = self._munp(1, *goodargs) if mu2 is None: mu2p = self._munp(2, *goodargs) mu2 = mu2p - mu * mu if mu3 is None: mu3p = self._munp(3, *goodargs) with np.errstate(invalid='ignore'): mu3 = mu3p - 3 * mu * mu2 - mu**3 with np.errstate(invalid='ignore'): mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4 g2 = mu4 / mu2**2.0 - 3.0 out0 = default.copy() place(out0, cond, g2) output.append(out0) else: # no valid args output = [] for _ in moments: out0 = default.copy() output.append(out0) if len(output) == 1: return output[0] else: return tuple(output) def entropy(self, *args, **kwds): """ Differential entropy of the RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional (continuous distributions only). Scale parameter (default=1). Notes ----- Entropy is defined base `e`: >>> drv = rv_discrete(values=((0, 1), (0.5, 0.5))) >>> np.allclose(drv.entropy(), np.log(2.0)) True """ args, loc, scale = self._parse_args(*args, **kwds) # NB: for discrete distributions scale=1 by construction in _parse_args args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) output = zeros(shape(cond0), 'd') place(output, (1-cond0), self.badvalue) goodargs = argsreduce(cond0, *args) place(output, cond0, self.vecentropy(*goodargs) + log(scale)) return output def moment(self, n, *args, **kwds): """ n-th order non-central moment of distribution. Parameters ---------- n : int, n >= 1 Order of moment. arg1, arg2, arg3,... : float The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) """ args, loc, scale = self._parse_args(*args, **kwds) if not (self._argcheck(*args) and (scale > 0)): return nan if (floor(n) != n): raise ValueError("Moment must be an integer.") if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): if self._stats_has_moments: mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]} else: mdict = {} mu, mu2, g1, g2 = self._stats(*args, **mdict) val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) # Convert to transformed X = L + S*Y # E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n) if loc == 0: return scale**n * val else: result = 0 fac = float(scale) / float(loc) for k in range(n): valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) result += comb(n, k, exact=True)*(fac**k) * valk result += fac**n * val return result * loc**n def median(self, *args, **kwds): """ Median of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter, Default is 0. scale : array_like, optional Scale parameter, Default is 1. Returns ------- median : float The median of the distribution. See Also -------- stats.distributions.rv_discrete.ppf Inverse of the CDF """ return self.ppf(0.5, *args, **kwds) def mean(self, *args, **kwds): """ Mean of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- mean : float the mean of the distribution """ kwds['moments'] = 'm' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def var(self, *args, **kwds): """ Variance of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- var : float the variance of the distribution """ kwds['moments'] = 'v' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def std(self, *args, **kwds): """ Standard deviation of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- std : float standard deviation of the distribution """ kwds['moments'] = 'v' res = sqrt(self.stats(*args, **kwds)) return res def interval(self, alpha, *args, **kwds): """ Confidence interval with equal areas around the median. Parameters ---------- alpha : array_like of float Probability that an rv will be drawn from the returned range. Each value should be in the range [0, 1]. arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : ndarray of float end-points of range that contain ``100 * alpha %`` of the rv's possible values. """ alpha = asarray(alpha) if np.any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ## continuous random variables: implement maybe later ## ## hf --- Hazard Function (PDF / SF) ## chf --- Cumulative hazard function (-log(SF)) ## psf --- Probability sparsity function (reciprocal of the pdf) in ## units of percent-point-function (as a function of q). ## Also, the derivative of the percent-point function. class rv_continuous(rv_generic): """ A generic continuous random variable class meant for subclassing. `rv_continuous` is a base class to construct specific distribution classes and instances for continuous random variables. It cannot be used directly as a distribution. Parameters ---------- momtype : int, optional The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf. a : float, optional Lower bound of the support of the distribution, default is minus infinity. b : float, optional Upper bound of the support of the distribution, default is plus infinity. xtol : float, optional The tolerance for fixed point calculation for generic ppf. badvalue : float, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example ``"m, n"`` for a distribution that takes two integers as the two shape arguments for all its methods. If not provided, shape parameters will be inferred from the signature of the private methods, ``_pdf`` and ``_cdf`` of the instance. extradoc : str, optional, deprecated This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. seed : None or int or ``numpy.random.RandomState`` instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance. Default is None. Methods ------- rvs pdf logpdf cdf logcdf sf logsf ppf isf moment stats entropy expect median mean std var interval __call__ fit fit_loc_scale nnlf Notes ----- Public methods of an instance of a distribution class (e.g., ``pdf``, ``cdf``) check their arguments and pass valid arguments to private, computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid if it is within the support of a distribution, ``self.a <= x <= self.b``. Whether a shape parameter is valid is decided by an ``_argcheck`` method (which defaults to checking that its arguments are strictly positive.) **Subclassing** New random variables can be defined by subclassing the `rv_continuous` class and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized to location 0 and scale 1). If positive argument checking is not correct for your RV then you will also need to re-define the ``_argcheck`` method. Correct, but potentially slow defaults exist for the remaining methods but for speed and/or accuracy you can over-ride:: _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could. **Methods that can be overwritten by subclasses** :: _rvs _pdf _cdf _sf _ppf _isf _stats _munp _entropy _argcheck There are additional (internal and private) generic methods that can be useful for cross-checking and for debugging, but might work in all cases when directly called. A note on ``shapes``: subclasses need not specify them explicitly. In this case, `shapes` will be automatically deduced from the signatures of the overridden methods (`pdf`, `cdf` etc). If, for some reason, you prefer to avoid relying on introspection, you can specify ``shapes`` explicitly as an argument to the instance constructor. **Frozen Distributions** Normally, you must provide shape parameters (and, optionally, location and scale parameters to each call of a method of a distribution. Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = generic(<shape(s)>, loc=0, scale=1) frozen RV object with the same methods but holding the given shape, location, and scale fixed **Statistics** Statistics are computed using numerical integration by default. For speed you can redefine this using ``_stats``: - take shape parameters and return mu, mu2, g1, g2 - If you can't compute one of these, return it as None - Can also be defined with a keyword argument ``moments``, which is a string composed of "m", "v", "s", and/or "k". Only the components appearing in string should be computed and returned in the order "m", "v", "s", or "k" with missing values returned as None. Alternatively, you can override ``_munp``, which takes ``n`` and shape parameters and returns the n-th non-central moment of the distribution. Examples -------- To create a new Gaussian distribution, we would do the following: >>> from scipy.stats import rv_continuous >>> class gaussian_gen(rv_continuous): ... "Gaussian distribution" ... def _pdf(self, x): ... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi) >>> gaussian = gaussian_gen(name='gaussian') ``scipy.stats`` distributions are *instances*, so here we subclass `rv_continuous` and create an instance. With this, we now have a fully functional distribution with all relevant methods automagically generated by the framework. Note that above we defined a standard normal distribution, with zero mean and unit variance. Shifting and scaling of the distribution can be done by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)`` essentially computes ``y = (x - loc) / scale`` and ``gaussian._pdf(y) / scale``. """ def __init__(self, momtype=1, a=None, b=None, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None, seed=None): super(rv_continuous, self).__init__(seed) # save the ctor parameters, cf generic freeze self._ctor_param = dict( momtype=momtype, a=a, b=b, xtol=xtol, badvalue=badvalue, name=name, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan if name is None: name = 'Distribution' self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -inf if b is None: self.b = inf self.xtol = xtol self.moment_type = momtype self.shapes = shapes self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf], locscale_in='loc=0, scale=1', locscale_out='loc, scale') # nin correction self._ppfvec = vectorize(self._ppf_single, otypes='d') self._ppfvec.nin = self.numargs + 1 self.vecentropy = vectorize(self._entropy, otypes='d') self._cdfvec = vectorize(self._cdf_single, otypes='d') self._cdfvec.nin = self.numargs + 1 self.extradoc = extradoc if momtype == 0: self.generic_moment = vectorize(self._mom0_sc, otypes='d') else: self.generic_moment = vectorize(self._mom1_sc, otypes='d') # Because of the *args argument of _mom0_sc, vectorize cannot count the # number of arguments correctly. self.generic_moment.nin = self.numargs + 1 if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if sys.flags.optimize < 2: # Skip adding docstrings if interpreter is run with -OO if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc, docdict=docdict, discrete='continuous') else: dct = dict(distcont) self._construct_doc(docdict, dct.get(self.name)) def _updated_ctor_param(self): """ Return the current version of _ctor_param, possibly updated by user. Used by freezing and pickling. Keep this in sync with the signature of __init__. """ dct = self._ctor_param.copy() dct['a'] = self.a dct['b'] = self.b dct['xtol'] = self.xtol dct['badvalue'] = self.badvalue dct['name'] = self.name dct['shapes'] = self.shapes dct['extradoc'] = self.extradoc return dct def _ppf_to_solve(self, x, q, *args): return self.cdf(*(x, )+args)-q def _ppf_single(self, q, *args): left = right = None if self.a > -np.inf: left = self.a if self.b < np.inf: right = self.b factor = 10. if not left: # i.e. self.a = -inf left = -1.*factor while self._ppf_to_solve(left, q, *args) > 0.: right = left left *= factor # left is now such that cdf(left) < q if not right: # i.e. self.b = inf right = factor while self._ppf_to_solve(right, q, *args) < 0.: left = right right *= factor # right is now such that cdf(right) > q return optimize.brentq(self._ppf_to_solve, left, right, args=(q,)+args, xtol=self.xtol) # moment from definition def _mom_integ0(self, x, m, *args): return x**m * self.pdf(x, *args) def _mom0_sc(self, m, *args): return integrate.quad(self._mom_integ0, self.a, self.b, args=(m,)+args)[0] # moment calculated using ppf def _mom_integ1(self, q, m, *args): return (self.ppf(q, *args))**m def _mom1_sc(self, m, *args): return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0] def _pdf(self, x, *args): return derivative(self._cdf, x, dx=1e-5, args=args, order=5) ## Could also define any of these def _logpdf(self, x, *args): return log(self._pdf(x, *args)) def _cdf_single(self, x, *args): return integrate.quad(self._pdf, self.a, x, args=args)[0] def _cdf(self, x, *args): return self._cdfvec(x, *args) ## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined ## in rv_generic def pdf(self, x, *args, **kwds): """ Probability density function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- pdf : ndarray Probability density function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._support_mask(x) & (scale > 0) cond = cond0 & cond1 output = zeros(shape(cond), dtyp) putmask(output, (1-cond0)+np.isnan(x), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._pdf(*goodargs) / scale) if output.ndim == 0: return output[()] return output def logpdf(self, x, *args, **kwds): """ Log of the probability density function at x of the given RV. This uses a more numerically accurate calculation if available. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logpdf : array_like Log of the probability density function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._support_mask(x) & (scale > 0) cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) putmask(output, (1-cond0)+np.isnan(x), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._logpdf(*goodargs) - log(scale)) if output.ndim == 0: return output[()] return output def cdf(self, x, *args, **kwds): """ Cumulative distribution function of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- cdf : ndarray Cumulative distribution function evaluated at `x` """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = zeros(shape(cond), dtyp) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 1.0) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._cdf(*goodargs)) if output.ndim == 0: return output[()] return output def logcdf(self, x, *args, **kwds): """ Log of the cumulative distribution function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue) place(output, cond2, 0.0) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self, x, *args, **kwds): """ Survival function (1 - `cdf`) at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- sf : array_like Survival function evaluated at x """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = zeros(shape(cond), dtyp) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 1.0) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._sf(*goodargs)) if output.ndim == 0: return output[()] return output def logsf(self, x, *args, **kwds): """ Log of the survival function of the given RV. Returns the log of the "survival function," defined as (1 - `cdf`), evaluated at `x`. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logsf : ndarray Log of the survival function evaluated at `x`. """ args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) dtyp = np.find_common_type([x.dtype, np.float64], []) x = np.asarray((x - loc)/scale, dtype=dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x) & (scale > 0) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(NINF) place(output, (1-cond0)+np.isnan(x), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output, cond, self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self, q, *args, **kwds): """ Percent point function (inverse of `cdf`) at q of the given RV. Parameters ---------- q : array_like lower tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : array_like quantile corresponding to the lower tail probability q. """ args, loc, scale = self._parse_args(*args, **kwds) q, loc, scale = map(asarray, (q, loc, scale)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) cond1 = (0 < q) & (q < 1) cond2 = cond0 & (q == 0) cond3 = cond0 & (q == 1) cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue) lower_bound = self.a * scale + loc upper_bound = self.b * scale + loc place(output, cond2, argsreduce(cond2, lower_bound)[0]) place(output, cond3, argsreduce(cond3, upper_bound)[0]) if np.any(cond): # call only if at least 1 entry goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output, cond, self._ppf(*goodargs) * scale + loc) if output.ndim == 0: return output[()] return output def isf(self, q, *args, **kwds): """ Inverse survival function (inverse of `sf`) at q of the given RV. Parameters ---------- q : array_like upper tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : ndarray or scalar Quantile corresponding to the upper tail probability q. """ args, loc, scale = self._parse_args(*args, **kwds) q, loc, scale = map(asarray, (q, loc, scale)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc) cond1 = (0 < q) & (q < 1) cond2 = cond0 & (q == 1) cond3 = cond0 & (q == 0) cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue) lower_bound = self.a * scale + loc upper_bound = self.b * scale + loc place(output, cond2, argsreduce(cond2, lower_bound)[0]) place(output, cond3, argsreduce(cond3, upper_bound)[0]) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(scale, loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output, cond, self._isf(*goodargs) * scale + loc) if output.ndim == 0: return output[()] return output def _nnlf(self, x, *args): return -np.sum(self._logpdf(x, *args), axis=0) def _unpack_loc_scale(self, theta): try: loc = theta[-2] scale = theta[-1] args = tuple(theta[:-2]) except IndexError: raise ValueError("Not enough input arguments.") return loc, scale, args def nnlf(self, theta, x): '''Return negative loglikelihood function. Notes ----- This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the parameters (including loc and scale). ''' loc, scale, args = self._unpack_loc_scale(theta) if not self._argcheck(*args) or scale <= 0: return inf x = asarray((x-loc) / scale) n_log_scale = len(x) * log(scale) if np.any(~self._support_mask(x)): return inf return self._nnlf(x, *args) + n_log_scale def _nnlf_and_penalty(self, x, args): cond0 = ~self._support_mask(x) n_bad = np.count_nonzero(cond0, axis=0) if n_bad > 0: x = argsreduce(~cond0, x)[0] logpdf = self._logpdf(x, *args) finite_logpdf = np.isfinite(logpdf) n_bad += np.sum(~finite_logpdf, axis=0) if n_bad > 0: penalty = n_bad * log(_XMAX) * 100 return -np.sum(logpdf[finite_logpdf], axis=0) + penalty return -np.sum(logpdf, axis=0) def _penalized_nnlf(self, theta, x): ''' Return penalized negative loglikelihood function, i.e., - sum (log pdf(x, theta), axis=0) + penalty where theta are the parameters (including loc and scale) ''' loc, scale, args = self._unpack_loc_scale(theta) if not self._argcheck(*args) or scale <= 0: return inf x = asarray((x-loc) / scale) n_log_scale = len(x) * log(scale) return self._nnlf_and_penalty(x, args) + n_log_scale # return starting point for fit (shape arguments + loc + scale) def _fitstart(self, data, args=None): if args is None: args = (1.0,)*self.numargs loc, scale = self._fit_loc_scale_support(data, *args) return args + (loc, scale) # Return the (possibly reduced) function to optimize in order to find MLE # estimates for the .fit method def _reduce_func(self, args, kwds): # First of all, convert fshapes params to fnum: eg for stats.beta, # shapes='a, b'. To fix `a`, can specify either `f1` or `fa`. # Convert the latter into the former. if self.shapes: shapes = self.shapes.replace(',', ' ').split() for j, s in enumerate(shapes): val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None) if val is not None: key = 'f%d' % j if key in kwds: raise ValueError("Duplicate entry for %s." % key) else: kwds[key] = val args = list(args) Nargs = len(args) fixedn = [] names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] x0 = [] for n, key in enumerate(names): if key in kwds: fixedn.append(n) args[n] = kwds.pop(key) else: x0.append(args[n]) if len(fixedn) == 0: func = self._penalized_nnlf restore = None else: if len(fixedn) == Nargs: raise ValueError( "All parameters fixed. There is nothing to optimize.") def restore(args, theta): # Replace with theta for all numbers not in fixedn # This allows the non-fixed values to vary, but # we still call self.nnlf with all parameters. i = 0 for n in range(Nargs): if n not in fixedn: args[n] = theta[i] i += 1 return args def func(theta, x): newtheta = restore(args[:], theta) return self._penalized_nnlf(newtheta, x) return x0, func, restore, args def fit(self, data, *args, **kwds): """ Return MLEs for shape (if applicable), location, and scale parameters from data. MLE stands for Maximum Likelihood Estimate. Starting estimates for the fit are given by input arguments; for any arguments not provided with starting estimates, ``self._fitstart(data)`` is called to generate such. One can hold some parameters fixed to specific values by passing in keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) and ``floc`` and ``fscale`` (for location and scale parameters, respectively). Parameters ---------- data : array_like Data to use in calculating the MLEs. args : floats, optional Starting value(s) for any shape-characterizing arguments (those not provided will be determined by a call to ``_fitstart(data)``). No default value. kwds : floats, optional Starting values for the location and scale parameters; no default. Special keyword arguments are recognized as holding certain parameters fixed: - f0...fn : hold respective shape parameters fixed. Alternatively, shape parameters to fix can be specified by name. For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a`` are equivalent to ``f0``, and ``fb`` and ``fix_b`` are equivalent to ``f1``. - floc : hold location parameter fixed to specified value. - fscale : hold scale parameter fixed to specified value. - optimizer : The optimizer to use. The optimizer must take ``func``, and starting position as the first two arguments, plus ``args`` (for extra arguments to pass to the function to be optimized) and ``disp=0`` to suppress output as keyword arguments. Returns ------- mle_tuple : tuple of floats MLEs for any shape parameters (if applicable), followed by those for location and scale. For most random variables, shape statistics will be returned, but there are exceptions (e.g. ``norm``). Notes ----- This fit is computed by maximizing a log-likelihood function, with penalty applied for samples outside of range of the distribution. The returned answer is not guaranteed to be the globally optimal MLE, it may only be locally optimal, or the optimization may fail altogether. Examples -------- Generate some data to fit: draw random variates from the `beta` distribution >>> from scipy.stats import beta >>> a, b = 1., 2. >>> x = beta.rvs(a, b, size=1000) Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``): >>> a1, b1, loc1, scale1 = beta.fit(x) We can also use some prior knowledge about the dataset: let's keep ``loc`` and ``scale`` fixed: >>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1) >>> loc1, scale1 (0, 1) We can also keep shape parameters fixed by using ``f``-keywords. To keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or, equivalently, ``fa=1``: >>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1) >>> a1 1 Not all distributions return estimates for the shape parameters. ``norm`` for example just returns estimates for location and scale: >>> from scipy.stats import norm >>> x = norm.rvs(a, b, size=1000, random_state=123) >>> loc1, scale1 = norm.fit(x) >>> loc1, scale1 (0.92087172783841631, 2.0015750750324668) """ Narg = len(args) if Narg > self.numargs: raise TypeError("Too many input arguments.") start = [None]*2 if (Narg < self.numargs) or not ('loc' in kwds and 'scale' in kwds): # get distribution specific starting locations start = self._fitstart(data) args += start[Narg:-2] loc = kwds.pop('loc', start[-2]) scale = kwds.pop('scale', start[-1]) args += (loc, scale) x0, func, restore, args = self._reduce_func(args, kwds) optimizer = kwds.pop('optimizer', optimize.fmin) # convert string to function in scipy.optimize if not callable(optimizer) and isinstance(optimizer, string_types): if not optimizer.startswith('fmin_'): optimizer = "fmin_"+optimizer if optimizer == 'fmin_': optimizer = 'fmin' try: optimizer = getattr(optimize, optimizer) except AttributeError: raise ValueError("%s is not a valid optimizer" % optimizer) # by now kwds must be empty, since everybody took what they needed if kwds: raise TypeError("Unknown arguments: %s." % kwds) vals = optimizer(func, x0, args=(ravel(data),), disp=0) if restore is not None: vals = restore(args, vals) vals = tuple(vals) return vals def _fit_loc_scale_support(self, data, *args): """ Estimate loc and scale parameters from data accounting for support. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data. """ data = np.asarray(data) # Estimate location and scale according to the method of moments. loc_hat, scale_hat = self.fit_loc_scale(data, *args) # Compute the support according to the shape parameters. self._argcheck(*args) a, b = self.a, self.b support_width = b - a # If the support is empty then return the moment-based estimates. if support_width <= 0: return loc_hat, scale_hat # Compute the proposed support according to the loc and scale estimates. a_hat = loc_hat + a * scale_hat b_hat = loc_hat + b * scale_hat # Use the moment-based estimates if they are compatible with the data. data_a = np.min(data) data_b = np.max(data) if a_hat < data_a and data_b < b_hat: return loc_hat, scale_hat # Otherwise find other estimates that are compatible with the data. data_width = data_b - data_a rel_margin = 0.1 margin = data_width * rel_margin # For a finite interval, both the location and scale # should have interesting values. if support_width < np.inf: loc_hat = (data_a - a) - margin scale_hat = (data_width + 2 * margin) / support_width return loc_hat, scale_hat # For a one-sided interval, use only an interesting location parameter. if a > -np.inf: return (data_a - a) - margin, 1 elif b < np.inf: return (data_b - b) + margin, 1 else: raise RuntimeError def fit_loc_scale(self, data, *args): """ Estimate loc and scale parameters from data using 1st and 2nd moments. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data. """ mu, mu2 = self.stats(*args, **{'moments': 'mv'}) tmp = asarray(data) muhat = tmp.mean() mu2hat = tmp.var() Shat = sqrt(mu2hat / mu2) Lhat = muhat - Shat*mu if not np.isfinite(Lhat): Lhat = 0 if not (np.isfinite(Shat) and (0 < Shat)): Shat = 1 return Lhat, Shat def _entropy(self, *args): def integ(x): val = self._pdf(x, *args) return entr(val) # upper limit is often inf, so suppress warnings when integrating olderr = np.seterr(over='ignore') h = integrate.quad(integ, self.a, self.b)[0] np.seterr(**olderr) if not np.isnan(h): return h else: # try with different limits if integration problems low, upp = self.ppf([1e-10, 1. - 1e-10], *args) if np.isinf(self.b): upper = upp else: upper = self.b if np.isinf(self.a): lower = low else: lower = self.a return integrate.quad(integ, lower, upper)[0] def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds): """Calculate expected value of a function with respect to the distribution. The expected value of a function ``f(x)`` with respect to a distribution ``dist`` is defined as:: ubound E[x] = Integral(f(x) * dist.pdf(x)) lbound Parameters ---------- func : callable, optional Function for which integral is calculated. Takes only one argument. The default is the identity mapping f(x) = x. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter (default=0). scale : float, optional Scale parameter (default=1). lb, ub : scalar, optional Lower and upper bound for integration. Default is set to the support of the distribution. conditional : bool, optional If True, the integral is corrected by the conditional probability of the integration interval. The return value is the expectation of the function, conditional on being in the given interval. Default is False. Additional keyword arguments are passed to the integration routine. Returns ------- expect : float The calculated expected value. Notes ----- The integration behavior of this function is inherited from `integrate.quad`. """ lockwds = {'loc': loc, 'scale': scale} self._argcheck(*args) if func is None: def fun(x, *args): return x * self.pdf(x, *args, **lockwds) else: def fun(x, *args): return func(x) * self.pdf(x, *args, **lockwds) if lb is None: lb = loc + self.a * scale if ub is None: ub = loc + self.b * scale if conditional: invfac = (self.sf(lb, *args, **lockwds) - self.sf(ub, *args, **lockwds)) else: invfac = 1.0 kwds['args'] = args # Silence floating point warnings from integration. olderr = np.seterr(all='ignore') vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac np.seterr(**olderr) return vals # Helpers for the discrete distributions def _drv2_moment(self, n, *args): """Non-central moment of discrete distribution.""" def fun(x): return np.power(x, n) * self._pmf(x, *args) return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc) def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm b = self.b a = self.a if isinf(b): # Be sure ending point is > q b = int(max(100*q, 10)) while 1: if b >= self.b: qb = 1.0 break qb = self._cdf(b, *args) if (qb < q): b += 10 else: break else: qb = 1.0 if isinf(a): # be sure starting point < q a = int(min(-100*q, -10)) while 1: if a <= self.a: qb = 0.0 break qa = self._cdf(a, *args) if (qa > q): a -= 10 else: break else: qa = self._cdf(a, *args) while 1: if (qa == q): return a if (qb == q): return b if b <= a+1: # testcase: return wrong number at lower index # python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong # python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)" # python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)" if qa > q: return a else: return b c = int((a+b)/2.0) qc = self._cdf(c, *args) if (qc < q): if a != c: a = c else: raise RuntimeError('updating stopped, endless loop') qa = qc elif (qc > q): if b != c: b = c else: raise RuntimeError('updating stopped, endless loop') qb = qc else: return c def entropy(pk, qk=None, base=None): """Calculate the entropy of a distribution for given probability values. If only probabilities `pk` are given, the entropy is calculated as ``S = -sum(pk * log(pk), axis=0)``. If `qk` is not None, then compute the Kullback-Leibler divergence ``S = sum(pk * log(pk / qk), axis=0)``. This routine will normalize `pk` and `qk` if they don't sum to 1. Parameters ---------- pk : sequence Defines the (discrete) distribution. ``pk[i]`` is the (possibly unnormalized) probability of event ``i``. qk : sequence, optional Sequence against which the relative entropy is computed. Should be in the same format as `pk`. base : float, optional The logarithmic base to use, defaults to ``e`` (natural logarithm). Returns ------- S : float The calculated entropy. """ pk = asarray(pk) pk = 1.0*pk / np.sum(pk, axis=0) if qk is None: vec = entr(pk) else: qk = asarray(qk) if len(qk) != len(pk): raise ValueError("qk and pk must have same length.") qk = 1.0*qk / np.sum(qk, axis=0) vec = rel_entr(pk, qk) S = np.sum(vec, axis=0) if base is not None: S /= log(base) return S # Must over-ride one of _pmf or _cdf or pass in # x_k, p(x_k) lists in initialization class rv_discrete(rv_generic): """ A generic discrete random variable class meant for subclassing. `rv_discrete` is a base class to construct specific distribution classes and instances for discrete random variables. It can also be used to construct an arbitrary distribution defined by a list of support points and corresponding probabilities. Parameters ---------- a : float, optional Lower bound of the support of the distribution, default: 0 b : float, optional Upper bound of the support of the distribution, default: plus infinity moment_tol : float, optional The tolerance for the generic calculation of moments. values : tuple of two array_like, optional ``(xk, pk)`` where ``xk`` are integers with non-zero probabilities ``pk`` with ``sum(pk) = 1``. inc : integer, optional Increment for the support of the distribution. Default is 1. (other values have not been tested) badvalue : float, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example "m, n" for a distribution that takes two integers as the two shape arguments for all its methods If not provided, shape parameters will be inferred from the signatures of the private methods, ``_pmf`` and ``_cdf`` of the instance. extradoc : str, optional This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. seed : None or int or ``numpy.random.RandomState`` instance, optional This parameter defines the RandomState object to use for drawing random variates. If None, the global np.random state is used. If integer, it is used to seed the local RandomState instance. Default is None. Methods ------- rvs pmf logpmf cdf logcdf sf logsf ppf isf moment stats entropy expect median mean std var interval __call__ Notes ----- This class is similar to `rv_continuous`. Whether a shape parameter is valid is decided by an ``_argcheck`` method (which defaults to checking that its arguments are strictly positive.) The main differences are: - the support of the distribution is a set of integers - instead of the probability density function, ``pdf`` (and the corresponding private ``_pdf``), this class defines the *probability mass function*, `pmf` (and the corresponding private ``_pmf``.) - scale parameter is not defined. To create a new discrete distribution, we would do the following: >>> from scipy.stats import rv_discrete >>> class poisson_gen(rv_discrete): ... "Poisson distribution" ... def _pmf(self, k, mu): ... return exp(-mu) * mu**k / factorial(k) and create an instance:: >>> poisson = poisson_gen(name="poisson") Note that above we defined the Poisson distribution in the standard form. Shifting the distribution can be done by providing the ``loc`` parameter to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)`` delegates the work to ``poisson._pmf(x-loc, mu)``. **Discrete distributions from a list of probabilities** Alternatively, you can construct an arbitrary discrete rv defined on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the ``values`` keyword argument to the `rv_discrete` constructor. Examples -------- Custom made discrete distribution: >>> from scipy import stats >>> xk = np.arange(7) >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2) >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) >>> >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 1) >>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r') >>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4) >>> plt.show() Random number generation: >>> R = custm.rvs(size=100) """ def __new__(cls, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): if values is not None: # dispatch to a subclass return super(rv_discrete, cls).__new__(rv_sample) else: # business as usual return super(rv_discrete, cls).__new__(cls) def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): super(rv_discrete, self).__init__(seed) # cf generic freeze self._ctor_param = dict( a=a, b=b, name=name, badvalue=badvalue, moment_tol=moment_tol, values=values, inc=inc, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan self.badvalue = badvalue self.a = a self.b = b self.moment_tol = moment_tol self.inc = inc self._cdfvec = vectorize(self._cdf_single, otypes='d') self.vecentropy = vectorize(self._entropy) self.shapes = shapes if values is not None: raise ValueError("rv_discrete.__init__(..., values != None, ...)") self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf], locscale_in='loc=0', # scale=1 for discrete RVs locscale_out='loc, 1') # nin correction needs to be after we know numargs # correct nin for generic moment vectorization _vec_generic_moment = vectorize(_drv2_moment, otypes='d') _vec_generic_moment.nin = self.numargs + 2 self.generic_moment = instancemethod(_vec_generic_moment, self, rv_discrete) # correct nin for ppf vectorization _vppf = vectorize(_drv2_ppfsingle, otypes='d') _vppf.nin = self.numargs + 2 self._ppfvec = instancemethod(_vppf, self, rv_discrete) # now that self.numargs is defined, we can adjust nin self._cdfvec.nin = self.numargs + 1 self._construct_docstrings(name, longname, extradoc) def _construct_docstrings(self, name, longname, extradoc): if name is None: name = 'Distribution' self.name = name self.extradoc = extradoc # generate docstring for subclass instances if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if sys.flags.optimize < 2: # Skip adding docstrings if interpreter is run with -OO if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc, docdict=docdict_discrete, discrete='discrete') else: dct = dict(distdiscrete) self._construct_doc(docdict_discrete, dct.get(self.name)) # discrete RV do not have the scale parameter, remove it self.__doc__ = self.__doc__.replace( '\n scale : array_like, ' 'optional\n scale parameter (default=1)', '') def _updated_ctor_param(self): """ Return the current version of _ctor_param, possibly updated by user. Used by freezing and pickling. Keep this in sync with the signature of __init__. """ dct = self._ctor_param.copy() dct['a'] = self.a dct['b'] = self.b dct['badvalue'] = self.badvalue dct['moment_tol'] = self.moment_tol dct['inc'] = self.inc dct['name'] = self.name dct['shapes'] = self.shapes dct['extradoc'] = self.extradoc return dct def _nonzero(self, k, *args): return floor(k) == k def _pmf(self, k, *args): return self._cdf(k, *args) - self._cdf(k-1, *args) def _logpmf(self, k, *args): return log(self._pmf(k, *args)) def _cdf_single(self, k, *args): m = arange(int(self.a), k+1) return np.sum(self._pmf(m, *args), axis=0) def _cdf(self, x, *args): k = floor(x) return self._cdfvec(k, *args) # generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic def rvs(self, *args, **kwargs): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). size : int or tuple of ints, optional Defining number of random variates (Default is 1). Note that `size` has to be given as keyword, not as positional argument. random_state : None or int or ``np.random.RandomState`` instance, optional If int or RandomState, use it for drawing the random variates. If None, rely on ``self.random_state``. Default is None. Returns ------- rvs : ndarray or scalar Random variates of given `size`. """ kwargs['discrete'] = True return super(rv_discrete, self).rvs(*args, **kwargs) def pmf(self, k, *args, **kwds): """ Probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter (default=0). Returns ------- pmf : array_like Probability mass function evaluated at k """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._pmf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logpmf(self, k, *args, **kwds): """ Log of the probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter. Default is 0. Returns ------- logpmf : array_like Log of the probability mass function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logpmf(*goodargs)) if output.ndim == 0: return output[()] return output def cdf(self, k, *args, **kwds): """ Cumulative distribution function of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- cdf : ndarray Cumulative distribution function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2*(cond0 == cond0), 1.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._cdf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logcdf(self, k, *args, **kwds): """ Log of the cumulative distribution function at k of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2*(cond0 == cond0), 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self, k, *args, **kwds): """ Survival function (1 - `cdf`) at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = zeros(shape(cond), 'd') place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 1.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, np.clip(self._sf(*goodargs), 0, 1)) if output.ndim == 0: return output[()] return output def logsf(self, k, *args, **kwds): """ Log of the survival function of the given RV. Returns the log of the "survival function," defined as 1 - `cdf`, evaluated at `k`. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logsf : ndarray Log of the survival function evaluated at `k`. """ args, loc, _ = self._parse_args(*args, **kwds) k, loc = map(asarray, (k, loc)) args = tuple(map(asarray, args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = empty(shape(cond), 'd') output.fill(NINF) place(output, (1-cond0) + np.isnan(k), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output, cond, self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self, q, *args, **kwds): """ Percent point function (inverse of `cdf`) at q of the given RV. Parameters ---------- q : array_like Lower tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : array_like Quantile corresponding to the lower tail probability, q. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond), value=self.badvalue, typecode='d') # output type 'd' to handle nin and inf place(output, (q == 0)*(cond == cond), self.a-1) place(output, cond2, self.b) if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] place(output, cond, self._ppf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def isf(self, q, *args, **kwds): """ Inverse survival function (inverse of `sf`) at q of the given RV. Parameters ---------- q : array_like Upper tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : ndarray or scalar Quantile corresponding to the upper tail probability, q. """ args, loc, _ = self._parse_args(*args, **kwds) q, loc = map(asarray, (q, loc)) args = tuple(map(asarray, args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q == 1) & cond0 cond = cond0 & cond1 # same problem as with ppf; copied from ppf and changed output = valarray(shape(cond), value=self.badvalue, typecode='d') # output type 'd' to handle nin and inf place(output, (q == 0)*(cond == cond), self.b) place(output, cond2, self.a-1) # call place only if at least 1 valid argument if np.any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] # PB same as ticket 766 place(output, cond, self._isf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def _entropy(self, *args): if hasattr(self, 'pk'): return entropy(self.pk) else: return _expect(lambda x: entr(self.pmf(x, *args)), self.a, self.b, self.ppf(0.5, *args), self.inc) def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32): """ Calculate expected value of a function with respect to the distribution for discrete distribution. Parameters ---------- func : callable, optional Function for which the expectation value is calculated. Takes only one argument. The default is the identity mapping f(k) = k. args : tuple, optional Shape parameters of the distribution. loc : float, optional Location parameter. Default is 0. lb, ub : int, optional Lower and upper bound for the summation, default is set to the support of the distribution, inclusive (``ul <= k <= ub``). conditional : bool, optional If true then the expectation is corrected by the conditional probability of the summation interval. The return value is the expectation of the function, `func`, conditional on being in the given interval (k such that ``ul <= k <= ub``). Default is False. maxcount : int, optional Maximal number of terms to evaluate (to avoid an endless loop for an infinite sum). Default is 1000. tolerance : float, optional Absolute tolerance for the summation. Default is 1e-10. chunksize : int, optional Iterate over the support of a distributions in chunks of this size. Default is 32. Returns ------- expect : float Expected value. Notes ----- For heavy-tailed distributions, the expected value may or may not exist, depending on the function, `func`. If it does exist, but the sum converges slowly, the accuracy of the result may be rather low. For instance, for ``zipf(4)``, accuracy for mean, variance in example is only 1e-5. increasing `maxcount` and/or `chunksize` may improve the result, but may also make zipf very slow. The function is not vectorized. """ if func is None: def fun(x): # loc and args from outer scope return (x+loc)*self._pmf(x, *args) else: def fun(x): # loc and args from outer scope return func(x+loc)*self._pmf(x, *args) # used pmf because _pmf does not check support in randint and there # might be problems(?) with correct self.a, self.b at this stage maybe # not anymore, seems to work now with _pmf self._argcheck(*args) # (re)generate scalar self.a and self.b if lb is None: lb = self.a else: lb = lb - loc # convert bound for standardized distribution if ub is None: ub = self.b else: ub = ub - loc # convert bound for standardized distribution if conditional: invfac = self.sf(lb-1, *args) - self.sf(ub, *args) else: invfac = 1.0 # iterate over the support, starting from the median x0 = self.ppf(0.5, *args) res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize) return res / invfac def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10, chunksize=32): """Helper for computing the expectation value of `fun`.""" # short-circuit if the support size is small enough if (ub - lb) <= chunksize: supp = np.arange(lb, ub+1, inc) vals = fun(supp) return np.sum(vals) # otherwise, iterate starting from x0 if x0 < lb: x0 = lb if x0 > ub: x0 = ub count, tot = 0, 0. # iterate over [x0, ub] inclusive for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning) return tot # iterate over [lb, x0) for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc): count += x.size delta = np.sum(fun(x)) tot += delta if abs(delta) < tolerance * x.size: break if count > maxcount: warnings.warn('expect(): sum did not converge', RuntimeWarning) break return tot def _iter_chunked(x0, x1, chunksize=4, inc=1): """Iterate from x0 to x1 in chunks of chunksize and steps inc. x0 must be finite, x1 need not be. In the latter case, the iterator is infinite. Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards (make sure to set inc < 0.) >>> [x for x in _iter_chunked(2, 5, inc=2)] [array([2, 4])] >>> [x for x in _iter_chunked(2, 11, inc=2)] [array([2, 4, 6, 8]), array([10])] >>> [x for x in _iter_chunked(2, -5, inc=-2)] [array([ 2, 0, -2, -4])] >>> [x for x in _iter_chunked(2, -9, inc=-2)] [array([ 2, 0, -2, -4]), array([-6, -8])] """ if inc == 0: raise ValueError('Cannot increment by zero.') if chunksize <= 0: raise ValueError('Chunk size must be positive; got %s.' % chunksize) s = 1 if inc > 0 else -1 stepsize = abs(chunksize * inc) x = x0 while (x - x1) * inc < 0: delta = min(stepsize, abs(x - x1)) step = delta * s supp = np.arange(x, x + step, inc) x += step yield supp class rv_sample(rv_discrete): """A 'sample' discrete distribution defined by the support and values. The ctor ignores most of the arguments, only needs the `values` argument. """ def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8, values=None, inc=1, longname=None, shapes=None, extradoc=None, seed=None): super(rv_discrete, self).__init__(seed) if values is None: raise ValueError("rv_sample.__init__(..., values=None,...)") # cf generic freeze self._ctor_param = dict( a=a, b=b, name=name, badvalue=badvalue, moment_tol=moment_tol, values=values, inc=inc, longname=longname, shapes=shapes, extradoc=extradoc, seed=seed) if badvalue is None: badvalue = nan self.badvalue = badvalue self.moment_tol = moment_tol self.inc = inc self.shapes = shapes self.vecentropy = self._entropy xk, pk = values if len(xk) != len(pk): raise ValueError("xk and pk need to have the same length.") if not np.allclose(np.sum(pk), 1): raise ValueError("The sum of provided pk is not 1.") indx = np.argsort(np.ravel(xk)) self.xk = np.take(np.ravel(xk), indx, 0) self.pk = np.take(np.ravel(pk), indx, 0) self.a = self.xk[0] self.b = self.xk[-1] self.qvals = np.cumsum(self.pk, axis=0) self.shapes = ' ' # bypass inspection self._construct_argparser(meths_to_inspect=[self._pmf], locscale_in='loc=0', # scale=1 for discrete RVs locscale_out='loc, 1') self._construct_docstrings(name, longname, extradoc) def _pmf(self, x): return np.select([x == k for k in self.xk], [np.broadcast_arrays(p, x)[0] for p in self.pk], 0) def _cdf(self, x): xx, xxk = np.broadcast_arrays(x[:, None], self.xk) indx = np.argmax(xxk > xx, axis=-1) - 1 return self.qvals[indx] def _ppf(self, q): qq, sqq = np.broadcast_arrays(q[..., None], self.qvals) indx = argmax(sqq >= qq, axis=-1) return self.xk[indx] def _rvs(self): # Need to define it explicitly, otherwise .rvs() with size=None # fails due to explicit broadcasting in _ppf U = self._random_state.random_sample(self._size) if self._size is None: U = np.array(U, ndmin=1) Y = self._ppf(U)[0] else: Y = self._ppf(U) return Y def _entropy(self): return entropy(self.pk) def generic_moment(self, n): n = asarray(n) return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0) def get_distribution_names(namespace_pairs, rv_base_class): """ Collect names of statistical distributions and their generators. Parameters ---------- namespace_pairs : sequence A snapshot of (name, value) pairs in the namespace of a module. rv_base_class : class The base class of random variable generator classes in a module. Returns ------- distn_names : list of strings Names of the statistical distributions. distn_gen_names : list of strings Names of the generators of the statistical distributions. Note that these are not simply the names of the statistical distributions, with a _gen suffix added. """ distn_names = [] distn_gen_names = [] for name, value in namespace_pairs: if name.startswith('_'): continue if name.endswith('_gen') and issubclass(value, rv_base_class): distn_gen_names.append(name) if isinstance(value, rv_base_class): distn_names.append(name) return distn_names, distn_gen_names
118,557
33.777941
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_constants.py
""" Statistics-related constants. """ from __future__ import division, print_function, absolute_import import numpy as np # The smallest representable positive number such that 1.0 + _EPS != 1.0. _EPS = np.finfo(float).eps # The largest [in magnitude] usable floating value. _XMAX = np.finfo(float).max # The log of the largest usable floating value; useful for knowing # when exp(something) will overflow _LOGXMAX = np.log(_XMAX) # The smallest [in magnitude] usable floating value. _XMIN = np.finfo(float).tiny # -special.psi(1) _EULER = 0.577215664901532860606512090082402431042 # special.zeta(3, 1) Apery's constant _ZETA3 = 1.202056903159594285399738161511449990765
681
23.357143
73
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/_discrete_distns.py
# # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # from __future__ import division, print_function, absolute_import from scipy import special from scipy.special import entr, logsumexp, betaln, gammaln as gamln from scipy._lib._numpy_compat import broadcast_to from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh import numpy as np from ._distn_infrastructure import ( rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names) class binom_gen(rv_discrete): r"""A binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `binom` is: .. math:: f(k) = \binom{n}{k} p^k (1-p)^{n-k} for ``k`` in ``{0, 1,..., n}``. `binom` takes ``n`` and ``p`` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, n, p): return self._random_state.binomial(n, p, self._size) def _argcheck(self, n, p): self.b = n return (n >= 0) & (p >= 0) & (p <= 1) def _logpmf(self, x, n, p): k = floor(x) combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p) def _pmf(self, x, n, p): # binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k) return exp(self._logpmf(x, n, p)) def _cdf(self, x, n, p): k = floor(x) vals = special.bdtr(k, n, p) return vals def _sf(self, x, n, p): k = floor(x) return special.bdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.bdtrik(q, n, p)) vals1 = np.maximum(vals - 1, 0) temp = special.bdtr(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p, moments='mv'): q = 1.0 - p mu = n * p var = n * p * q g1, g2 = None, None if 's' in moments: g1 = (q - p) / sqrt(var) if 'k' in moments: g2 = (1.0 - 6*p*q) / var return mu, var, g1, g2 def _entropy(self, n, p): k = np.r_[0:n + 1] vals = self._pmf(k, n, p) return np.sum(entr(vals), axis=0) binom = binom_gen(name='binom') class bernoulli_gen(binom_gen): r"""A Bernoulli discrete random variable. %(before_notes)s Notes ----- The probability mass function for `bernoulli` is: .. math:: f(k) = \begin{cases}1-p &\text{if } k = 0\\ p &\text{if } k = 1\end{cases} for :math:`k` in :math:`\{0, 1\}`. `bernoulli` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p): return binom_gen._rvs(self, 1, p) def _argcheck(self, p): return (p >= 0) & (p <= 1) def _logpmf(self, x, p): return binom._logpmf(x, 1, p) def _pmf(self, x, p): # bernoulli.pmf(k) = 1-p if k = 0 # = p if k = 1 return binom._pmf(x, 1, p) def _cdf(self, x, p): return binom._cdf(x, 1, p) def _sf(self, x, p): return binom._sf(x, 1, p) def _ppf(self, q, p): return binom._ppf(q, 1, p) def _stats(self, p): return binom._stats(1, p) def _entropy(self, p): return entr(p) + entr(1-p) bernoulli = bernoulli_gen(b=1, name='bernoulli') class nbinom_gen(rv_discrete): r"""A negative binomial discrete random variable. %(before_notes)s Notes ----- Negative binomial distribution describes a sequence of i.i.d. Bernoulli trials, repeated until a predefined, non-random number of successes occurs. The probability mass function of the number of failures for `nbinom` is: .. math:: f(k) = \binom{k+n-1}{n-1} p^n (1-p)^k for :math:`k \ge 0`. `nbinom` takes :math:`n` and :math:`p` as shape parameters where n is the number of successes, whereas p is the probability of a single success. %(after_notes)s %(example)s """ def _rvs(self, n, p): return self._random_state.negative_binomial(n, p, self._size) def _argcheck(self, n, p): return (n > 0) & (p >= 0) & (p <= 1) def _pmf(self, x, n, p): # nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k return exp(self._logpmf(x, n, p)) def _logpmf(self, x, n, p): coeff = gamln(n+x) - gamln(x+1) - gamln(n) return coeff + n*log(p) + special.xlog1py(x, -p) def _cdf(self, x, n, p): k = floor(x) return special.betainc(n, k+1, p) def _sf_skip(self, x, n, p): # skip because special.nbdtrc doesn't work for 0<n<1 k = floor(x) return special.nbdtrc(k, n, p) def _ppf(self, q, n, p): vals = ceil(special.nbdtrik(q, n, p)) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, n, p) return np.where(temp >= q, vals1, vals) def _stats(self, n, p): Q = 1.0 / p P = Q - 1.0 mu = n*P var = n*P*Q g1 = (Q+P)/sqrt(n*P*Q) g2 = (1.0 + 6*P*Q) / (n*P*Q) return mu, var, g1, g2 nbinom = nbinom_gen(name='nbinom') class geom_gen(rv_discrete): r"""A geometric discrete random variable. %(before_notes)s Notes ----- The probability mass function for `geom` is: .. math:: f(k) = (1-p)^{k-1} p for :math:`k \ge 1`. `geom` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p): return self._random_state.geometric(p, size=self._size) def _argcheck(self, p): return (p <= 1) & (p >= 0) def _pmf(self, k, p): # geom.pmf(k) = (1-p)**(k-1)*p return np.power(1-p, k-1) * p def _logpmf(self, k, p): return special.xlog1py(k - 1, -p) + log(p) def _cdf(self, x, p): k = floor(x) return -expm1(log1p(-p)*k) def _sf(self, x, p): return np.exp(self._logsf(x, p)) def _logsf(self, x, p): k = floor(x) return k*log1p(-p) def _ppf(self, q, p): vals = ceil(log(1.0-q)/log(1-p)) temp = self._cdf(vals-1, p) return np.where((temp >= q) & (vals > 0), vals-1, vals) def _stats(self, p): mu = 1.0/p qr = 1.0-p var = qr / p / p g1 = (2.0-p) / sqrt(qr) g2 = np.polyval([1, -6, 6], p)/(1.0-p) return mu, var, g1, g2 geom = geom_gen(a=1, name='geom', longname="A geometric") class hypergeom_gen(rv_discrete): r"""A hypergeometric discrete random variable. The hypergeometric distribution models drawing objects from a bin. `M` is the total number of objects, `n` is total number of Type I objects. The random variate represents the number of Type I objects in `N` drawn without replacement from the total population. %(before_notes)s Notes ----- The symbols used to denote the shape parameters (`M`, `n`, and `N`) are not universally accepted. See the Examples for a clarification of the definitions used here. The probability mass function is defined as, .. math:: p(k, M, n, N) = \frac{\binom{n}{k} \binom{M - n}{N - k}} {\binom{M}{N}} for :math:`k \in [\max(0, N - M + n), \min(n, N)]`, where the binomial coefficients are defined as, .. math:: \binom{n}{k} \equiv \frac{n!}{k! (n - k)!}. %(after_notes)s Examples -------- >>> from scipy.stats import hypergeom >>> import matplotlib.pyplot as plt Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs if we choose at random 12 of the 20 animals, we can initialize a frozen distribution and plot the probability mass function: >>> [M, n, N] = [20, 7, 12] >>> rv = hypergeom(M, n, N) >>> x = np.arange(0, n+1) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group of chosen animals') >>> ax.set_ylabel('hypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `hypergeom` methods directly. To for example obtain the cumulative distribution function, use: >>> prb = hypergeom.cdf(x, M, n, N) And to generate random numbers: >>> R = hypergeom.rvs(M, n, N, size=10) """ def _rvs(self, M, n, N): return self._random_state.hypergeometric(n, M-n, N, size=self._size) def _argcheck(self, M, n, N): cond = (M > 0) & (n >= 0) & (N >= 0) cond &= (n <= M) & (N <= M) self.a = np.maximum(N-(M-n), 0) self.b = np.minimum(n, N) return cond def _logpmf(self, k, M, n, N): tot, good = M, n bad = tot - good return betaln(good+1, 1) + betaln(bad+1,1) + betaln(tot-N+1, N+1)\ - betaln(k+1, good-k+1) - betaln(N-k+1,bad-N+k+1)\ - betaln(tot+1, 1) def _pmf(self, k, M, n, N): # same as the following but numerically more precise # return comb(good, k) * comb(bad, N-k) / comb(tot, N) return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): # tot, good, sample_size = M, n, N # "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n') M, n, N = 1.*M, 1.*n, 1.*N m = M - n p = n/M mu = N*p var = m*n*N*(M - N)*1.0/(M*M*(M-1)) g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N))) g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m g2 *= (M-1)*M*M g2 += 6.*n*N*(M-N)*m*(5.*M-6) g2 /= n * N * (M-N) * m * (M-2.) * (M-3.) return mu, var, g1, g2 def _entropy(self, M, n, N): k = np.r_[N - (M - n):min(n, N) + 1] vals = self.pmf(k, M, n, N) return np.sum(entr(vals), axis=0) def _sf(self, k, M, n, N): """More precise calculation, 1 - cdf doesn't cut it.""" # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual # integration. res = [] for quant, tot, good, draw in zip(k, M, n, N): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) def _logsf(self, k, M, n, N): """ More precise calculation than log(sf) """ res = [] for quant, tot, good, draw in zip(k, M, n, N): # Integration over probability mass function using logsumexp k2 = np.arange(quant + 1, draw + 1) res.append(logsumexp(self._logpmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom') # FIXME: Fails _cdfvec class logser_gen(rv_discrete): r"""A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is: .. math:: f(k) = - \frac{p^k}{k \log(1-p)} for :math:`k \ge 1`. `logser` takes :math:`p` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, p): # looks wrong for p>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return self._random_state.logseries(p, size=self._size) def _argcheck(self, p): return (p > 0) & (p < 1) def _pmf(self, k, p): # logser.pmf(k) = - p**k / (k*log(1-p)) return -np.power(p, k) * 1.0 / k / special.log1p(-p) def _stats(self, p): r = special.log1p(-p) mu = p / (p - 1.0) / r mu2p = -p / r / (p - 1.0)**2 var = mu2p - mu*mu mu3p = -p / r * (1.0+p) / (1.0 - p)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / np.power(var, 1.5) mu4p = -p / r * ( 1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1, name='logser', longname='A logarithmic') class poisson_gen(rv_discrete): r"""A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is: .. math:: f(k) = \exp(-\mu) \frac{mu^k}{k!} for :math:`k \ge 0`. `poisson` takes :math:`\mu` as shape parameter. %(after_notes)s %(example)s """ # Override rv_discrete._argcheck to allow mu=0. def _argcheck(self, mu): return mu >= 0 def _rvs(self, mu): return self._random_state.poisson(mu, self._size) def _logpmf(self, k, mu): Pk = special.xlogy(k, mu) - gamln(k + 1) - mu return Pk def _pmf(self, k, mu): # poisson.pmf(k) = exp(-mu) * mu**k / k! return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k, mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k, mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q, mu)) vals1 = np.maximum(vals - 1, 0) temp = special.pdtr(vals1, mu) return np.where(temp >= q, vals1, vals) def _stats(self, mu): var = mu tmp = np.asarray(mu) mu_nonzero = tmp > 0 g1 = _lazywhere(mu_nonzero, (tmp,), lambda x: sqrt(1.0/x), np.inf) g2 = _lazywhere(mu_nonzero, (tmp,), lambda x: 1.0/x, np.inf) return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson') class planck_gen(rv_discrete): r"""A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is: .. math:: f(k) = (1-\exp(-\lambda)) \exp(-\lambda k) for :math:`k \lambda \ge 0`. `planck` takes :math:`\lambda` as shape parameter. %(after_notes)s %(example)s """ def _argcheck(self, lambda_): self.a = np.where(lambda_ > 0, 0, -np.inf) self.b = np.where(lambda_ > 0, np.inf, 0) return lambda_ != 0 def _pmf(self, k, lambda_): # planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k) fact = (1-exp(-lambda_)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return 1-exp(-lambda_*(k+1)) def _sf(self, x, lambda_): return np.exp(self._logsf(x, lambda_)) def _logsf(self, x, lambda_): k = floor(x) return -lambda_*(k+1) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(self.a, np.inf) temp = self._cdf(vals1, lambda_) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_): mu = 1/(exp(lambda_)-1) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): l = lambda_ C = (1-exp(-l)) return l*exp(-l)/C - log(C) planck = planck_gen(name='planck', longname='A discrete exponential ') class boltzmann_gen(rv_discrete): r"""A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is: .. math:: f(k) = (1-\exp(-\lambda) \exp(-\lambda k)/(1-\exp(-\lambda N)) for :math:`k = 0,..., N-1`. `boltzmann` takes :math:`\lambda` and :math:`N` as shape parameters. %(after_notes)s %(example)s """ def _pmf(self, k, lambda_, N): # boltzmann.pmf(k) = # (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N)) fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return np.where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann', longname='A truncated discrete exponential ') class randint_gen(rv_discrete): r"""A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is: .. math:: f(k) = \frac{1}{high - low} for ``k = low, ..., high - 1``. `randint` takes ``low`` and ``high`` as shape parameters. %(after_notes)s %(example)s """ def _argcheck(self, low, high): self.a = low self.b = high - 1 return (high > low) def _pmf(self, k, low, high): # randint.pmf(k) = 1./(high - low) p = np.ones_like(k) / (high - low) return np.where((k >= low) & (k < high), p, 0.) def _cdf(self, x, low, high): k = floor(x) return (k - low + 1.) / (high - low) def _ppf(self, q, low, high): vals = ceil(q * (high - low) + low) - 1 vals1 = (vals - 1).clip(low, high) temp = self._cdf(vals1, low, high) return np.where(temp >= q, vals1, vals) def _stats(self, low, high): m2, m1 = np.asarray(high), np.asarray(low) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d*d - 1) / 12.0 g1 = 0.0 g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0) return mu, var, g1, g2 def _rvs(self, low, high): """An array of *size* random integers >= ``low`` and < ``high``.""" if self._size is not None: # Numpy's RandomState.randint() doesn't broadcast its arguments. # Use `broadcast_to()` to extend the shapes of low and high # up to self._size. Then we can use the numpy.vectorize'd # randint without needing to pass it a `size` argument. low = broadcast_to(low, self._size) high = broadcast_to(high, self._size) randint = np.vectorize(self._random_state.randint, otypes=[np.int_]) return randint(low, high) def _entropy(self, low, high): return log(high - low) randint = randint_gen(name='randint', longname='A discrete uniform ' '(random integer)') # FIXME: problems sampling. class zipf_gen(rv_discrete): r"""A Zipf discrete random variable. %(before_notes)s Notes ----- The probability mass function for `zipf` is: .. math:: f(k, a) = \frac{1}{\zeta(a) k^a} for :math:`k \ge 1`. `zipf` takes :math:`a` as shape parameter. %(after_notes)s %(example)s """ def _rvs(self, a): return self._random_state.zipf(a, size=self._size) def _argcheck(self, a): return a > 1 def _pmf(self, k, a): # zipf.pmf(k, a) = 1/(zeta(a) * k**a) Pk = 1.0 / special.zeta(a, 1) / k**a return Pk def _munp(self, n, a): return _lazywhere( a > n + 1, (a, n), lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1), np.inf) zipf = zipf_gen(a=1, name='zipf', longname='A Zipf') class dlaplace_gen(rv_discrete): r"""A Laplacian discrete random variable. %(before_notes)s Notes ----- The probability mass function for `dlaplace` is: .. math:: f(k) = \tanh(a/2) \exp(-a |k|) for :math:`a > 0`. `dlaplace` takes :math:`a` as shape parameter. %(after_notes)s %(example)s """ def _pmf(self, k, a): # dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) return tanh(a/2.0) * exp(-a * abs(k)) def _cdf(self, x, a): k = floor(x) f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1) f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1) return _lazywhere(k >= 0, (k, a), f=f, f2=f2) def _ppf(self, q, a): const = 1 + exp(a) vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1, -log((1-q) * const) / a)) vals1 = vals - 1 return np.where(self._cdf(vals1, a) >= q, vals1, vals) def _stats(self, a): ea = exp(a) mu2 = 2.*ea/(ea-1.)**2 mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4 return 0., mu2, 0., mu4/mu2**2 - 3. def _entropy(self, a): return a / sinh(a) - log(tanh(a/2.0)) dlaplace = dlaplace_gen(a=-np.inf, name='dlaplace', longname='A discrete Laplacian') class skellam_gen(rv_discrete): r"""A Skellam discrete random variable. %(before_notes)s Notes ----- Probability distribution of the difference of two correlated or uncorrelated Poisson random variables. Let :math:`k_1` and :math:`k_2` be two Poisson-distributed r.v. with expected values lam1 and lam2. Then, :math:`k_1 - k_2` follows a Skellam distribution with parameters :math:`\mu_1 = \lambda_1 - \rho \sqrt{\lambda_1 \lambda_2}` and :math:`\mu_2 = \lambda_2 - \rho \sqrt{\lambda_1 \lambda_2}`, where :math:`\rho` is the correlation coefficient between :math:`k_1` and :math:`k_2`. If the two Poisson-distributed r.v. are independent then :math:`\rho = 0`. Parameters :math:`\mu_1` and :math:`\mu_2` must be strictly positive. For details see: http://en.wikipedia.org/wiki/Skellam_distribution `skellam` takes :math:`\mu_1` and :math:`\mu_2` as shape parameters. %(after_notes)s %(example)s """ def _rvs(self, mu1, mu2): n = self._size return (self._random_state.poisson(mu1, n) - self._random_state.poisson(mu2, n)) def _pmf(self, x, mu1, mu2): px = np.where(x < 0, _ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2, _ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2) # ncx2.pdf() returns nan's for extremely low probabilities return px def _cdf(self, x, mu1, mu2): x = floor(x) px = np.where(x < 0, _ncx2_cdf(2*mu2, -2*x, 2*mu1), 1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2)) return px def _stats(self, mu1, mu2): mean = mu1 - mu2 var = mu1 + mu2 g1 = mean / sqrt((var)**3) g2 = 1 / var return mean, var, g1, g2 skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam') # Collect names of classes and objects in this module. pairs = list(globals().items()) _distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete) __all__ = _distn_names + _distn_gen_names
23,447
25.435175
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/contingency.py
"""Some functions for working with contingency tables (i.e. cross tabulations). """ from __future__ import division, print_function, absolute_import from functools import reduce import numpy as np from .stats import power_divergence __all__ = ['margins', 'expected_freq', 'chi2_contingency'] def margins(a): """Return a list of the marginal sums of the array `a`. Parameters ---------- a : ndarray The array for which to compute the marginal sums. Returns ------- margsums : list of ndarrays A list of length `a.ndim`. `margsums[k]` is the result of summing `a` over all axes except `k`; it has the same number of dimensions as `a`, but the length of each axis except axis `k` will be 1. Examples -------- >>> a = np.arange(12).reshape(2, 6) >>> a array([[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11]]) >>> m0, m1 = margins(a) >>> m0 array([[15], [51]]) >>> m1 array([[ 6, 8, 10, 12, 14, 16]]) >>> b = np.arange(24).reshape(2,3,4) >>> m0, m1, m2 = margins(b) >>> m0 array([[[ 66]], [[210]]]) >>> m1 array([[[ 60], [ 92], [124]]]) >>> m2 array([[[60, 66, 72, 78]]]) """ margsums = [] ranged = list(range(a.ndim)) for k in ranged: marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) margsums.append(marg) return margsums def expected_freq(observed): """ Compute the expected frequencies from a contingency table. Given an n-dimensional contingency table of observed frequencies, compute the expected frequencies for the table based on the marginal sums under the assumption that the groups associated with each dimension are independent. Parameters ---------- observed : array_like The table of observed frequencies. (While this function can handle a 1-D array, that case is trivial. Generally `observed` is at least 2-D.) Returns ------- expected : ndarray of float64 The expected frequencies, based on the marginal sums of the table. Same shape as `observed`. Examples -------- >>> observed = np.array([[10, 10, 20],[20, 20, 20]]) >>> from scipy.stats import expected_freq >>> expected_freq(observed) array([[ 12., 12., 16.], [ 18., 18., 24.]]) """ # Typically `observed` is an integer array. If `observed` has a large # number of dimensions or holds large values, some of the following # computations may overflow, so we first switch to floating point. observed = np.asarray(observed, dtype=np.float64) # Create a list of the marginal sums. margsums = margins(observed) # Create the array of expected frequencies. The shapes of the # marginal sums returned by apply_over_axes() are just what we # need for broadcasting in the following product. d = observed.ndim expected = reduce(np.multiply, margsums) / observed.sum() ** (d - 1) return expected def chi2_contingency(observed, correction=True, lambda_=None): """Chi-square test of independence of variables in a contingency table. This function computes the chi-square statistic and p-value for the hypothesis test of independence of the observed frequencies in the contingency table [1]_ `observed`. The expected frequencies are computed based on the marginal sums under the assumption of independence; see `scipy.stats.contingency.expected_freq`. The number of degrees of freedom is (expressed using numpy functions and attributes):: dof = observed.size - sum(observed.shape) + observed.ndim - 1 Parameters ---------- observed : array_like The contingency table. The table contains the observed frequencies (i.e. number of occurrences) in each category. In the two-dimensional case, the table is often described as an "R x C table". correction : bool, optional If True, *and* the degrees of freedom is 1, apply Yates' correction for continuity. The effect of the correction is to adjust each observed value by 0.5 towards the corresponding expected value. lambda_ : float or str, optional. By default, the statistic computed in this test is Pearson's chi-squared statistic [2]_. `lambda_` allows a statistic from the Cressie-Read power divergence family [3]_ to be used instead. See `power_divergence` for details. Returns ------- chi2 : float The test statistic. p : float The p-value of the test dof : int Degrees of freedom expected : ndarray, same shape as `observed` The expected frequencies, based on the marginal sums of the table. See Also -------- contingency.expected_freq fisher_exact chisquare power_divergence Notes ----- An often quoted guideline for the validity of this calculation is that the test should be used only if the observed and expected frequencies in each cell are at least 5. This is a test for the independence of different categories of a population. The test is only meaningful when the dimension of `observed` is two or more. Applying the test to a one-dimensional table will always result in `expected` equal to `observed` and a chi-square statistic equal to 0. This function does not handle masked arrays, because the calculation does not make sense with missing values. Like stats.chisquare, this function computes a chi-square statistic; the convenience this function provides is to figure out the expected frequencies and degrees of freedom from the given contingency table. If these were already known, and if the Yates' correction was not required, one could use stats.chisquare. That is, if one calls:: chi2, p, dof, ex = chi2_contingency(obs, correction=False) then the following is true:: (chi2, p) == stats.chisquare(obs.ravel(), f_exp=ex.ravel(), ddof=obs.size - 1 - dof) The `lambda_` argument was added in version 0.13.0 of scipy. References ---------- .. [1] "Contingency table", http://en.wikipedia.org/wiki/Contingency_table .. [2] "Pearson's chi-squared test", http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test .. [3] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984), pp. 440-464. Examples -------- A two-way example (2 x 3): >>> from scipy.stats import chi2_contingency >>> obs = np.array([[10, 10, 20], [20, 20, 20]]) >>> chi2_contingency(obs) (2.7777777777777777, 0.24935220877729619, 2, array([[ 12., 12., 16.], [ 18., 18., 24.]])) Perform the test using the log-likelihood ratio (i.e. the "G-test") instead of Pearson's chi-squared statistic. >>> g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood") >>> g, p (2.7688587616781319, 0.25046668010954165) A four-way example (2 x 2 x 2 x 2): >>> obs = np.array( ... [[[[12, 17], ... [11, 16]], ... [[11, 12], ... [15, 16]]], ... [[[23, 15], ... [30, 22]], ... [[14, 17], ... [15, 16]]]]) >>> chi2_contingency(obs) (8.7584514426741897, 0.64417725029295503, 11, array([[[[ 14.15462386, 14.15462386], [ 16.49423111, 16.49423111]], [[ 11.2461395 , 11.2461395 ], [ 13.10500554, 13.10500554]]], [[[ 19.5591166 , 19.5591166 ], [ 22.79202844, 22.79202844]], [[ 15.54012004, 15.54012004], [ 18.10873492, 18.10873492]]]])) """ observed = np.asarray(observed) if np.any(observed < 0): raise ValueError("All values in `observed` must be nonnegative.") if observed.size == 0: raise ValueError("No data; `observed` has size 0.") expected = expected_freq(observed) if np.any(expected == 0): # Include one of the positions where expected is zero in # the exception message. zeropos = list(zip(*np.where(expected == 0)))[0] raise ValueError("The internally computed table of expected " "frequencies has a zero element at %s." % (zeropos,)) # The degrees of freedom dof = expected.size - sum(expected.shape) + expected.ndim - 1 if dof == 0: # Degenerate case; this occurs when `observed` is 1D (or, more # generally, when it has only one nontrivial dimension). In this # case, we also have observed == expected, so chi2 is 0. chi2 = 0.0 p = 1.0 else: if dof == 1 and correction: # Adjust `observed` according to Yates' correction for continuity. observed = observed + 0.5 * np.sign(expected - observed) chi2, p = power_divergence(observed, expected, ddof=observed.size - 1 - dof, axis=None, lambda_=lambda_) return chi2, p, dof, expected
9,324
33.032847
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_binned_statistic.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose from scipy.stats import (binned_statistic, binned_statistic_2d, binned_statistic_dd) from scipy._lib.six import u from .common_tests import check_named_results class TestBinnedStatistic(object): @classmethod def setup_class(cls): np.random.seed(9865) cls.x = np.random.random(100) cls.y = np.random.random(100) cls.v = np.random.random(100) cls.X = np.random.random((100, 3)) cls.w = np.random.random(100) def test_1d_count(self): x = self.x v = self.v count1, edges1, bc = binned_statistic(x, v, 'count', bins=10) count2, edges2 = np.histogram(x, bins=10) assert_allclose(count1, count2) assert_allclose(edges1, edges2) def test_gh5927(self): # smoke test for gh5927 - binned_statistic was using `is` for string # comparison x = self.x v = self.v statistics = [u'mean', u'median', u'count', u'sum'] for statistic in statistics: res = binned_statistic(x, v, statistic, bins=10) def test_1d_result_attributes(self): x = self.x v = self.v res = binned_statistic(x, v, 'count', bins=10) attributes = ('statistic', 'bin_edges', 'binnumber') check_named_results(res, attributes) def test_1d_sum(self): x = self.x v = self.v sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10) sum2, edges2 = np.histogram(x, bins=10, weights=v) assert_allclose(sum1, sum2) assert_allclose(edges1, edges2) def test_1d_mean(self): x = self.x v = self.v stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10) stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_1d_std(self): x = self.x v = self.v stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10) stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_1d_min(self): x = self.x v = self.v stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10) stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_1d_max(self): x = self.x v = self.v stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10) stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_1d_median(self): x = self.x v = self.v stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10) stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_1d_bincode(self): x = self.x[:20] v = self.v[:20] count1, edges1, bc = binned_statistic(x, v, 'count', bins=3) bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1, 1, 2, 1]) bcount = [(bc == i).sum() for i in np.unique(bc)] assert_allclose(bc, bc2) assert_allclose(bcount, count1) def test_1d_range_keyword(self): # Regression test for gh-3063, range can be (min, max) or [(min, max)] np.random.seed(9865) x = np.arange(30) data = np.random.random(30) mean, bins, _ = binned_statistic(x[:15], data[:15]) mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)]) mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14)) assert_allclose(mean, mean_range) assert_allclose(bins, bins_range) assert_allclose(mean, mean_range2) assert_allclose(bins, bins_range2) def test_1d_multi_values(self): x = self.x v = self.v w = self.w stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10) stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10) stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10) assert_allclose(stat2[0], stat1v) assert_allclose(stat2[1], stat1w) assert_allclose(edges1v, edges2) assert_allclose(bc1v, bc2) def test_2d_count(self): x = self.x y = self.y v = self.v count1, binx1, biny1, bc = binned_statistic_2d( x, y, v, 'count', bins=5) count2, binx2, biny2 = np.histogram2d(x, y, bins=5) assert_allclose(count1, count2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_result_attributes(self): x = self.x y = self.y v = self.v res = binned_statistic_2d(x, y, v, 'count', bins=5) attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber') check_named_results(res, attributes) def test_2d_sum(self): x = self.x y = self.y v = self.v sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5) sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v) assert_allclose(sum1, sum2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_mean(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5) stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_mean_unicode(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d( x, y, v, u('mean'), bins=5) stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_std(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5) stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_min(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5) stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_max(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5) stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_median(self): x = self.x y = self.y v = self.v stat1, binx1, biny1, bc = binned_statistic_2d( x, y, v, 'median', bins=5) stat2, binx2, biny2, bc = binned_statistic_2d( x, y, v, np.median, bins=5) assert_allclose(stat1, stat2) assert_allclose(binx1, binx2) assert_allclose(biny1, biny2) def test_2d_bincode(self): x = self.x[:20] y = self.y[:20] v = self.v[:20] count1, binx1, biny1, bc = binned_statistic_2d( x, y, v, 'count', bins=3) bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16, 6, 11, 16, 6, 6, 11, 8]) bcount = [(bc == i).sum() for i in np.unique(bc)] assert_allclose(bc, bc2) count1adj = count1[count1.nonzero()] assert_allclose(bcount, count1adj) def test_2d_multi_values(self): x = self.x y = self.y v = self.v w = self.w stat1v, binx1v, biny1v, bc1v = binned_statistic_2d( x, y, v, 'mean', bins=8) stat1w, binx1w, biny1w, bc1w = binned_statistic_2d( x, y, w, 'mean', bins=8) stat2, binx2, biny2, bc2 = binned_statistic_2d( x, y, [v, w], 'mean', bins=8) assert_allclose(stat2[0], stat1v) assert_allclose(stat2[1], stat1w) assert_allclose(binx1v, binx2) assert_allclose(biny1w, biny2) assert_allclose(bc1v, bc2) def test_2d_binnumbers_unraveled(self): x = self.x y = self.y v = self.v stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20) stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10) stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d( x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True) bcx3 = np.searchsorted(edgesx, x, side='right') bcy3 = np.searchsorted(edgesy, y, side='right') # `numpy.searchsorted` is non-inclusive on right-edge, compensate bcx3[x == x.max()] -= 1 bcy3[y == y.max()] -= 1 assert_allclose(bcx, bc2[0]) assert_allclose(bcy, bc2[1]) assert_allclose(bcx3, bc2[0]) assert_allclose(bcy3, bc2[1]) def test_dd_count(self): X = self.X v = self.v count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) count2, edges2 = np.histogramdd(X, bins=3) assert_allclose(count1, count2) assert_allclose(edges1, edges2) def test_dd_result_attributes(self): X = self.X v = self.v res = binned_statistic_dd(X, v, 'count', bins=3) attributes = ('statistic', 'bin_edges', 'binnumber') check_named_results(res, attributes) def test_dd_sum(self): X = self.X v = self.v sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3) sum2, edges2 = np.histogramdd(X, bins=3, weights=v) assert_allclose(sum1, sum2) assert_allclose(edges1, edges2) def test_dd_mean(self): X = self.X v = self.v stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3) stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_dd_std(self): X = self.X v = self.v stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3) stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_dd_min(self): X = self.X v = self.v stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3) stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_dd_max(self): X = self.X v = self.v stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3) stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_dd_median(self): X = self.X v = self.v stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3) stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3) assert_allclose(stat1, stat2) assert_allclose(edges1, edges2) def test_dd_bincode(self): X = self.X[:20] v = self.v[:20] count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3) bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92, 32, 36, 91, 43, 87, 81, 81]) bcount = [(bc == i).sum() for i in np.unique(bc)] assert_allclose(bc, bc2) count1adj = count1[count1.nonzero()] assert_allclose(bcount, count1adj) def test_dd_multi_values(self): X = self.X v = self.v w = self.w stat1v, edges1v, bc1v = binned_statistic_dd(X, v, np.std, bins=8) stat1w, edges1w, bc1w = binned_statistic_dd(X, w, np.std, bins=8) stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], np.std, bins=8) assert_allclose(stat2[0], stat1v) assert_allclose(stat2[1], stat1w) assert_allclose(edges1v, edges2) assert_allclose(edges1w, edges2) assert_allclose(bc1v, bc2) def test_dd_binnumbers_unraveled(self): X = self.X v = self.v stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15) stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20) stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10) stat2, edges2, bc2 = binned_statistic_dd( X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True) assert_allclose(bcx, bc2[0]) assert_allclose(bcy, bc2[1]) assert_allclose(bcz, bc2[2])
13,447
29.703196
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_morestats.py
# Author: Travis Oliphant, 2002 # # Further enhancements and tests added by numerous SciPy developers. # from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy.random import RandomState from numpy.testing import (assert_array_equal, assert_almost_equal, assert_array_less, assert_array_almost_equal, assert_, assert_allclose, assert_equal, assert_warns) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings from scipy import stats from .common_tests import check_named_results # Matplotlib is not a scipy dependency but is optionally used in probplot, so # check if it's available try: import matplotlib.pyplot as plt have_matplotlib = True except: have_matplotlib = False g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000] g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988] g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996] g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996] g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996] g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996] g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002] g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006] g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991] g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997] class TestBayes_mvs(object): def test_basic(self): # Expected values in this test simply taken from the function. For # some checks regarding correctness of implementation, see review in # gh-674 data = [6, 9, 12, 7, 8, 8, 13] mean, var, std = stats.bayes_mvs(data) assert_almost_equal(mean.statistic, 9.0) assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467), rtol=1e-14) assert_almost_equal(var.statistic, 10.0) assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018), rtol=1e-09) assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14) assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312), rtol=1e-14) def test_empty_input(self): assert_raises(ValueError, stats.bayes_mvs, []) def test_result_attributes(self): x = np.arange(15) attributes = ('statistic', 'minmax') res = stats.bayes_mvs(x) for i in res: check_named_results(i, attributes) class TestMvsdist(object): def test_basic(self): data = [6, 9, 12, 7, 8, 8, 13] mean, var, std = stats.mvsdist(data) assert_almost_equal(mean.mean(), 9.0) assert_allclose(mean.interval(0.9), (7.1036502226125329, 10.896349777387467), rtol=1e-14) assert_almost_equal(var.mean(), 10.0) assert_allclose(var.interval(0.9), (3.1767242068607087, 24.45910381334018), rtol=1e-09) assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14) assert_allclose(std.interval(0.9), (1.7823367265645145, 4.9456146050146312), rtol=1e-14) def test_empty_input(self): assert_raises(ValueError, stats.mvsdist, []) def test_bad_arg(self): # Raise ValueError if fewer than two data points are given. data = [1] assert_raises(ValueError, stats.mvsdist, data) def test_warns(self): # regression test for gh-5270 # make sure there are no spurious divide-by-zero warnings with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) [x.mean() for x in stats.mvsdist([1, 2, 3])] [x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])] class TestShapiro(object): def test_basic(self): x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, 4.43, 0.21, 4.75, 0.71, 1.52, 3.24, 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66] w, pw = stats.shapiro(x1) assert_almost_equal(w, 0.90047299861907959, 6) assert_almost_equal(pw, 0.042089745402336121, 6) x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, 3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69, 0.08, 3.67, 2.81, 3.49] w, pw = stats.shapiro(x2) assert_almost_equal(w, 0.9590270, 6) assert_almost_equal(pw, 0.52460, 3) # Verified against R np.random.seed(12345678) x3 = stats.norm.rvs(loc=5, scale=3, size=100) w, pw = stats.shapiro(x3) assert_almost_equal(w, 0.9772805571556091, decimal=6) assert_almost_equal(pw, 0.08144091814756393, decimal=3) # Extracted from original paper x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614, 0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206, 3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351] W_expected = 0.83467 p_expected = 0.000914 w, pw = stats.shapiro(x4) assert_almost_equal(w, W_expected, decimal=4) assert_almost_equal(pw, p_expected, decimal=5) def test_2d(self): x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46, 4.43, 0.21, 4.75], [0.71, 1.52, 3.24, 0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]] w, pw = stats.shapiro(x1) assert_almost_equal(w, 0.90047299861907959, 6) assert_almost_equal(pw, 0.042089745402336121, 6) x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11, 3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69, 0.08, 3.67, 2.81, 3.49]] w, pw = stats.shapiro(x2) assert_almost_equal(w, 0.9590270, 6) assert_almost_equal(pw, 0.52460, 3) def test_empty_input(self): assert_raises(ValueError, stats.shapiro, []) assert_raises(ValueError, stats.shapiro, [[], [], []]) def test_not_enough_values(self): assert_raises(ValueError, stats.shapiro, [1, 2]) assert_raises(ValueError, stats.shapiro, [[], [2]]) def test_bad_arg(self): # Length of x is less than 3. x = [1] assert_raises(ValueError, stats.shapiro, x) def test_nan_input(self): x = np.arange(10.) x[9] = np.nan w, pw = stats.shapiro(x) assert_equal(w, np.nan) assert_almost_equal(pw, 1.0) class TestAnderson(object): def test_normal(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A, crit, sig = stats.anderson(x1) assert_array_less(crit[:-1], A) A, crit, sig = stats.anderson(x2) assert_array_less(A, crit[-2:]) v = np.ones(10) v[0] = 0 A, crit, sig = stats.anderson(v) # The expected statistic 3.208057 was computed independently of scipy. # For example, in R: # > library(nortest) # > v <- rep(1, 10) # > v[1] <- 0 # > result <- ad.test(v) # > result$statistic # A # 3.208057 assert_allclose(A, 3.208057) def test_expon(self): rs = RandomState(1234567890) x1 = rs.standard_exponential(size=50) x2 = rs.standard_normal(size=50) A, crit, sig = stats.anderson(x1, 'expon') assert_array_less(A, crit[-2:]) olderr = np.seterr(all='ignore') try: A, crit, sig = stats.anderson(x2, 'expon') finally: np.seterr(**olderr) assert_(A > crit[-1]) def test_gumbel(self): # Regression test for gh-6306. Before that issue was fixed, # this case would return a2=inf. v = np.ones(100) v[0] = 0.0 a2, crit, sig = stats.anderson(v, 'gumbel') # A brief reimplementation of the calculation of the statistic. n = len(v) xbar, s = stats.gumbel_l.fit(v) logcdf = stats.gumbel_l.logcdf(v, xbar, s) logsf = stats.gumbel_l.logsf(v, xbar, s) i = np.arange(1, n+1) expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1])) assert_allclose(a2, expected_a2) def test_bad_arg(self): assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp') def test_result_attributes(self): rs = RandomState(1234567890) x = rs.standard_exponential(size=50) res = stats.anderson(x) attributes = ('statistic', 'critical_values', 'significance_level') check_named_results(res, attributes) def test_gumbel_l(self): # gh-2592, gh-6337 # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. rs = RandomState(1234567890) x = rs.gumbel(size=100) A1, crit1, sig1 = stats.anderson(x, 'gumbel') A2, crit2, sig2 = stats.anderson(x, 'gumbel_l') assert_allclose(A2, A1) def test_gumbel_r(self): # gh-2592, gh-6337 # Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist. rs = RandomState(1234567890) x1 = rs.gumbel(size=100) x2 = np.ones(100) A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r') A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r') assert_array_less(A1, crit1[-2:]) assert_(A2 > crit2[-1]) class TestAndersonKSamp(object): def test_example1a(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass a mixture of lists and arrays t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4), midrank=False) with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) assert_almost_equal(Tk, 4.449, 3) assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], tm, 4) assert_almost_equal(p, 0.0021, 4) def test_example1b(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass arrays t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]) t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True) assert_almost_equal(Tk, 4.480, 3) assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459], tm, 4) assert_almost_equal(p, 0.0020, 4) def test_example2a(self): # Example data taken from an earlier technical report of # Scholz and Stephens # Pass lists instead of arrays t1 = [194, 15, 41, 29, 33, 181] t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, 118, 25, 156, 310, 76, 26, 44, 23, 62] t5 = [130, 208, 70, 101, 208] t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, 22, 139, 210, 97, 30, 23, 13, 14] t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] t12 = [50, 254, 5, 283, 35, 12] t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, 61, 34] with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14), midrank=False) assert_almost_equal(Tk, 3.288, 3) assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], tm, 4) assert_almost_equal(p, 0.0041, 4) def test_example2b(self): # Example data taken from an earlier technical report of # Scholz and Stephens t1 = [194, 15, 41, 29, 33, 181] t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118] t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34] t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29, 118, 25, 156, 310, 76, 26, 44, 23, 62] t5 = [130, 208, 70, 101, 208] t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27] t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33] t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95] t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82, 54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24] t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36, 22, 139, 210, 97, 30, 23, 13, 14] t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438] t12 = [50, 254, 5, 283, 35, 12] t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130] t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66, 61, 34] with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14), midrank=True) assert_almost_equal(Tk, 3.294, 3) assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009], tm, 4) assert_almost_equal(p, 0.0041, 4) def test_not_enough_samples(self): assert_raises(ValueError, stats.anderson_ksamp, np.ones(5)) def test_no_distinct_observations(self): assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), np.ones(5))) def test_empty_sample(self): assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), [])) def test_result_attributes(self): # Example data from Scholz & Stephens (1987), originally # published in Lehmann (1995, Nonparametrics, Statistical # Methods Based on Ranks, p. 309) # Pass a mixture of lists and arrays t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0] t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8]) t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0]) t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8]) with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False) attributes = ('statistic', 'critical_values', 'significance_level') check_named_results(res, attributes) def test_overflow(self): # when significance_level approximation overflows, should still return with suppress_warnings() as sup: sup.filter(UserWarning, message='approximate p-value') res = stats.anderson_ksamp([[-20, -10] * 100, [-10, 40, 12] * 100]) assert_almost_equal(res[0], 272.796, 3) class TestAnsari(object): def test_small(self): x = [1, 2, 3, 3, 4] y = [3, 2, 6, 1, 6, 1, 4, 1] with suppress_warnings() as sup: sup.filter(UserWarning, "Ties preclude use of exact statistic.") W, pval = stats.ansari(x, y) assert_almost_equal(W, 23.5, 11) assert_almost_equal(pval, 0.13499256881897437, 11) def test_approx(self): ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99, 101, 96, 97, 102, 107, 113, 116, 113, 110, 98)) parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104, 100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)) with suppress_warnings() as sup: sup.filter(UserWarning, "Ties preclude use of exact statistic.") W, pval = stats.ansari(ramsay, parekh) assert_almost_equal(W, 185.5, 11) assert_almost_equal(pval, 0.18145819972867083, 11) def test_exact(self): W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12]) assert_almost_equal(W, 10.0, 11) assert_almost_equal(pval, 0.533333333333333333, 7) def test_bad_arg(self): assert_raises(ValueError, stats.ansari, [], [1]) assert_raises(ValueError, stats.ansari, [1], []) def test_result_attributes(self): x = [1, 2, 3, 3, 4] y = [3, 2, 6, 1, 6, 1, 4, 1] with suppress_warnings() as sup: sup.filter(UserWarning, "Ties preclude use of exact statistic.") res = stats.ansari(x, y) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) class TestBartlett(object): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] T, pval = stats.bartlett(*args) assert_almost_equal(T, 20.78587342806484, 7) assert_almost_equal(pval, 0.0136358632781, 7) def test_bad_arg(self): # Too few args raises ValueError. assert_raises(ValueError, stats.bartlett, [1]) def test_result_attributes(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] res = stats.bartlett(*args) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_empty_arg(self): args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, []) assert_equal((np.nan, np.nan), stats.bartlett(*args)) class TestLevene(object): def test_data(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] W, pval = stats.levene(*args) assert_almost_equal(W, 1.7059176930008939, 7) assert_almost_equal(pval, 0.0990829755522, 7) def test_trimmed1(self): # Test that center='trimmed' gives the same result as center='mean' # when proportiontocut=0. W1, pval1 = stats.levene(g1, g2, g3, center='mean') W2, pval2 = stats.levene(g1, g2, g3, center='trimmed', proportiontocut=0.0) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] np.random.seed(1234) x2 = np.random.permutation(x) # Use center='trimmed' W0, pval0 = stats.levene(x, y, center='trimmed', proportiontocut=0.125) W1, pval1 = stats.levene(x2, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(W0, W2) assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_equal_mean_median(self): x = np.linspace(-1, 1, 21) np.random.seed(1234) x2 = np.random.permutation(x) y = x**3 W1, pval1 = stats.levene(x, y, center='mean') W2, pval2 = stats.levene(x2, y, center='median') assert_almost_equal(W1, W2) assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1, 1, 21) assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1, 1, 21) assert_raises(ValueError, stats.levene, x, x, center='trim') def test_too_few_args(self): assert_raises(ValueError, stats.levene, [1]) def test_result_attributes(self): args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10] res = stats.levene(*args) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) class TestBinomP(object): def test_data(self): pval = stats.binom_test(100, 250) assert_almost_equal(pval, 0.0018833009350757682, 11) pval = stats.binom_test(201, 405) assert_almost_equal(pval, 0.92085205962670713, 11) pval = stats.binom_test([682, 243], p=3.0/4) assert_almost_equal(pval, 0.38249155957481695, 11) def test_bad_len_x(self): # Length of x must be 1 or 2. assert_raises(ValueError, stats.binom_test, [1, 2, 3]) def test_bad_n(self): # len(x) is 1, but n is invalid. # Missing n assert_raises(ValueError, stats.binom_test, [100]) # n less than x[0] assert_raises(ValueError, stats.binom_test, [100], n=50) def test_bad_p(self): assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0) def test_alternatives(self): res = stats.binom_test(51, 235, p=1./6, alternative='less') assert_almost_equal(res, 0.982022657605858) res = stats.binom_test(51, 235, p=1./6, alternative='greater') assert_almost_equal(res, 0.02654424571169085) res = stats.binom_test(51, 235, p=1./6, alternative='two-sided') assert_almost_equal(res, 0.0437479701823997) class TestFligner(object): def test_data(self): # numbers from R: fligner.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.fligner(x1, x1**2), (3.2282229927203536, 0.072379187848207877), 11) def test_trimmed1(self): # Perturb input to break ties in the transformed data # See https://github.com/scipy/scipy/pull/8042 for more details rs = np.random.RandomState(123) _perturb = lambda g: (np.asarray(g) + 1e-10*rs.randn(len(g))).tolist() g1_ = _perturb(g1) g2_ = _perturb(g2) g3_ = _perturb(g3) # Test that center='trimmed' gives the same result as center='mean' # when proportiontocut=0. Xsq1, pval1 = stats.fligner(g1_, g2_, g3_, center='mean') Xsq2, pval2 = stats.fligner(g1_, g2_, g3_, center='trimmed', proportiontocut=0.0) assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) def test_trimmed2(self): x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0] y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0] # Use center='trimmed' Xsq1, pval1 = stats.fligner(x, y, center='trimmed', proportiontocut=0.125) # Trim the data here, and use center='mean' Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean') # Result should be the same. assert_almost_equal(Xsq1, Xsq2) assert_almost_equal(pval1, pval2) # The following test looks reasonable at first, but fligner() uses the # function stats.rankdata(), and in one of the cases in this test, # there are ties, while in the other (because of normal rounding # errors) there are not. This difference leads to differences in the # third significant digit of W. # #def test_equal_mean_median(self): # x = np.linspace(-1,1,21) # y = x**3 # W1, pval1 = stats.fligner(x, y, center='mean') # W2, pval2 = stats.fligner(x, y, center='median') # assert_almost_equal(W1, W2) # assert_almost_equal(pval1, pval2) def test_bad_keyword(self): x = np.linspace(-1, 1, 21) assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1) def test_bad_center_value(self): x = np.linspace(-1, 1, 21) assert_raises(ValueError, stats.fligner, x, x, center='trim') def test_bad_num_args(self): # Too few args raises ValueError. assert_raises(ValueError, stats.fligner, [1]) def test_empty_arg(self): x = np.arange(5) assert_equal((np.nan, np.nan), stats.fligner(x, x**2, [])) class TestMood(object): def test_mood(self): # numbers from R: mood.test in package stats x1 = np.arange(5) assert_array_almost_equal(stats.mood(x1, x1**2), (-1.3830857299399906, 0.16663858066771478), 11) def test_mood_order_of_args(self): # z should change sign when the order of arguments changes, pvalue # should not change np.random.seed(1234) x1 = np.random.randn(10, 1) x2 = np.random.randn(15, 1) z1, p1 = stats.mood(x1, x2) z2, p2 = stats.mood(x2, x1) assert_array_almost_equal([z1, p1], [-z2, p2]) def test_mood_with_axis_none(self): # Test with axis = None, compare with results from R x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047, 1.59528080213779, 0.329507771815361, -0.820468384118015, 0.487429052428485, 0.738324705129217, 0.575781351653492, -0.305388387156356, 1.51178116845085, 0.389843236411431, -0.621240580541804, -2.2146998871775, 1.12493091814311, -0.0449336090152309, -0.0161902630989461, 0.943836210685299, 0.821221195098089, 0.593901321217509] x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882, -1.13037567424629, -0.0802517565509893, 0.132420284381094, 0.707954729271733, -0.23969802417184, 1.98447393665293, -0.138787012119665, 0.417650750792556, 0.981752777463662, -0.392695355503813, -1.03966897694891, 1.78222896030858, -2.31106908460517, 0.878604580921265, 0.035806718015226, 1.01282869212708, 0.432265154539617, 2.09081920524915, -1.19992581964387, 1.58963820029007, 1.95465164222325, 0.00493777682814261, -2.45170638784613, 0.477237302613617, -0.596558168631403, 0.792203270299649, 0.289636710177348] x1 = np.array(x1) x2 = np.array(x2) x1.shape = (10, 2) x2.shape = (15, 2) assert_array_almost_equal(stats.mood(x1, x2, axis=None), [-1.31716607555, 0.18778296257]) def test_mood_2d(self): # Test if the results of mood test in 2-D case are consistent with the # R result for the same inputs. Numbers from R mood.test(). ny = 5 np.random.seed(1234) x1 = np.random.randn(10, ny) x2 = np.random.randn(15, ny) z_vectest, pval_vectest = stats.mood(x1, x2) for j in range(ny): assert_array_almost_equal([z_vectest[j], pval_vectest[j]], stats.mood(x1[:, j], x2[:, j])) # inverse order of dimensions x1 = x1.transpose() x2 = x2.transpose() z_vectest, pval_vectest = stats.mood(x1, x2, axis=1) for i in range(ny): # check axis handling is self consistent assert_array_almost_equal([z_vectest[i], pval_vectest[i]], stats.mood(x1[i, :], x2[i, :])) def test_mood_3d(self): shape = (10, 5, 6) np.random.seed(1234) x1 = np.random.randn(*shape) x2 = np.random.randn(*shape) for axis in range(3): z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis) # Tests that result for 3-D arrays is equal to that for the # same calculation on a set of 1-D arrays taken from the # 3-D array axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis for i in range(shape[axes_idx[axis][0]]): for j in range(shape[axes_idx[axis][1]]): if axis == 0: slice1 = x1[:, i, j] slice2 = x2[:, i, j] elif axis == 1: slice1 = x1[i, :, j] slice2 = x2[i, :, j] else: slice1 = x1[i, j, :] slice2 = x2[i, j, :] assert_array_almost_equal([z_vectest[i, j], pval_vectest[i, j]], stats.mood(slice1, slice2)) def test_mood_bad_arg(self): # Raise ValueError when the sum of the lengths of the args is # less than 3 assert_raises(ValueError, stats.mood, [1], []) class TestProbplot(object): def test_basic(self): np.random.seed(12345) x = stats.norm.rvs(size=20) osm, osr = stats.probplot(x, fit=False) osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575, -0.73908135, -0.5857176, -0.44506467, -0.31273668, -0.18568928, -0.06158146, 0.06158146, 0.18568928, 0.31273668, 0.44506467, 0.5857176, 0.73908135, 0.91222575, 1.11829229, 1.38768012, 1.8241636] assert_allclose(osr, np.sort(x)) assert_allclose(osm, osm_expected) res, res_fit = stats.probplot(x, fit=True) res_fit_expected = [1.05361841, 0.31297795, 0.98741609] assert_allclose(res_fit, res_fit_expected) def test_sparams_keyword(self): np.random.seed(123456) x = stats.norm.rvs(size=100) # Check that None, () and 0 (loc=0, for normal distribution) all work # and give the same results osm1, osr1 = stats.probplot(x, sparams=None, fit=False) osm2, osr2 = stats.probplot(x, sparams=0, fit=False) osm3, osr3 = stats.probplot(x, sparams=(), fit=False) assert_allclose(osm1, osm2) assert_allclose(osm1, osm3) assert_allclose(osr1, osr2) assert_allclose(osr1, osr3) # Check giving (loc, scale) params for normal distribution osm, osr = stats.probplot(x, sparams=(), fit=False) def test_dist_keyword(self): np.random.seed(12345) x = stats.norm.rvs(size=20) osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,)) osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,)) assert_allclose(osm1, osm2) assert_allclose(osr1, osr2) assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name') assert_raises(AttributeError, stats.probplot, x, dist=[]) class custom_dist(object): """Some class that looks just enough like a distribution.""" def ppf(self, q): return stats.norm.ppf(q, loc=2) osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False) osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False) assert_allclose(osm1, osm2) assert_allclose(osr1, osr2) @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") def test_plot_kwarg(self): np.random.seed(7654321) fig = plt.figure() fig.add_subplot(111) x = stats.t.rvs(3, size=100) res1, fitres1 = stats.probplot(x, plot=plt) plt.close() res2, fitres2 = stats.probplot(x, plot=None) res3 = stats.probplot(x, fit=False, plot=plt) plt.close() res4 = stats.probplot(x, fit=False, plot=None) # Check that results are consistent between combinations of `fit` and # `plot` keywords. assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2) assert_allclose(res1, res2) assert_allclose(res1, res3) assert_allclose(res1, res4) assert_allclose(fitres1, fitres2) # Check that a Matplotlib Axes object is accepted fig = plt.figure() ax = fig.add_subplot(111) stats.probplot(x, fit=False, plot=ax) plt.close() def test_probplot_bad_args(self): # Raise ValueError when given an invalid distribution. assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp") def test_empty(self): assert_equal(stats.probplot([], fit=False), (np.array([]), np.array([]))) assert_equal(stats.probplot([], fit=True), ((np.array([]), np.array([])), (np.nan, np.nan, 0.0))) def test_array_of_size_one(self): with np.errstate(invalid='ignore'): assert_equal(stats.probplot([1], fit=True), ((np.array([0.]), np.array([1])), (np.nan, np.nan, 0.0))) def test_wilcoxon_bad_arg(): # Raise ValueError when two args of different lengths are given or # zero_method is unknown. assert_raises(ValueError, stats.wilcoxon, [1], [1, 2]) assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy") def test_wilcoxon_arg_type(): # Should be able to accept list as arguments. # Address issue 6070. arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2] _ = stats.wilcoxon(arr, zero_method="pratt") _ = stats.wilcoxon(arr, zero_method="zsplit") _ = stats.wilcoxon(arr, zero_method="wilcox") class TestKstat(object): def test_moments_normal_distribution(self): np.random.seed(32149) data = np.random.randn(12345) moments = [] for n in [1, 2, 3, 4]: moments.append(stats.kstat(data, n)) expected = [0.011315, 1.017931, 0.05811052, 0.0754134] assert_allclose(moments, expected, rtol=1e-4) # test equivalence with `stats.moment` m1 = stats.moment(data, moment=1) m2 = stats.moment(data, moment=2) m3 = stats.moment(data, moment=3) assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2) def test_empty_input(self): assert_raises(ValueError, stats.kstat, []) def test_nan_input(self): data = np.arange(10.) data[6] = np.nan assert_equal(stats.kstat(data), np.nan) def test_kstat_bad_arg(self): # Raise ValueError if n > 4 or n < 1. data = np.arange(10) for n in [0, 4.001]: assert_raises(ValueError, stats.kstat, data, n=n) class TestKstatVar(object): def test_empty_input(self): assert_raises(ValueError, stats.kstatvar, []) def test_nan_input(self): data = np.arange(10.) data[6] = np.nan assert_equal(stats.kstat(data), np.nan) def test_bad_arg(self): # Raise ValueError is n is not 1 or 2. data = [1] n = 10 assert_raises(ValueError, stats.kstatvar, data, n=n) class TestPpccPlot(object): def setup_method(self): np.random.seed(7654321) self.x = stats.loggamma.rvs(5, size=500) + 5 def test_basic(self): N = 5 svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N) ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182, 0.93519298] assert_allclose(svals, np.linspace(-10, 10, num=N)) assert_allclose(ppcc, ppcc_expected) def test_dist(self): # Test that we can specify distributions both by name and as objects. svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda') svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10, dist=stats.tukeylambda) assert_allclose(svals1, svals2, rtol=1e-20) assert_allclose(ppcc1, ppcc2, rtol=1e-20) # Test that 'tukeylambda' is the default dist svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10) assert_allclose(svals1, svals3, rtol=1e-20) assert_allclose(ppcc1, ppcc3, rtol=1e-20) @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") def test_plot_kwarg(self): # Check with the matplotlib.pyplot module fig = plt.figure() ax = fig.add_subplot(111) stats.ppcc_plot(self.x, -20, 20, plot=plt) fig.delaxes(ax) # Check that a Matplotlib Axes object is accepted ax = fig.add_subplot(111) stats.ppcc_plot(self.x, -20, 20, plot=ax) plt.close() def test_invalid_inputs(self): # `b` has to be larger than `a` assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0) # Raise ValueError when given an invalid distribution. assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1, dist="plate_of_shrimp") def test_empty(self): # For consistency with probplot return for one empty array, # ppcc contains all zeros and svals is the same as for normal array # input. svals, ppcc = stats.ppcc_plot([], 0, 1) assert_allclose(svals, np.linspace(0, 1, num=80)) assert_allclose(ppcc, np.zeros(80, dtype=float)) class TestPpccMax(object): def test_ppcc_max_bad_arg(self): # Raise ValueError when given an invalid distribution. data = [1] assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp") def test_ppcc_max_basic(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5) def test_dist(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 # Test that we can specify distributions both by name and as objects. max1 = stats.ppcc_max(x, dist='tukeylambda') max2 = stats.ppcc_max(x, dist=stats.tukeylambda) assert_almost_equal(max1, -0.71215366521264145, decimal=5) assert_almost_equal(max2, -0.71215366521264145, decimal=5) # Test that 'tukeylambda' is the default dist max3 = stats.ppcc_max(x) assert_almost_equal(max3, -0.71215366521264145, decimal=5) def test_brack(self): np.random.seed(1234567) x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5)) # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)), -0.71215366521264145, decimal=5) # On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7 # it is accurate up to 16 decimals assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)), -0.71215366521264145, decimal=5) class TestBoxcox_llf(object): def test_basic(self): np.random.seed(54321) x = stats.norm.rvs(size=10000, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2)) assert_allclose(llf, llf_expected) def test_array_like(self): np.random.seed(54321) x = stats.norm.rvs(size=100, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf2 = stats.boxcox_llf(lmbda, list(x)) assert_allclose(llf, llf2, rtol=1e-12) def test_2d_input(self): # Note: boxcox_llf() was already working with 2-D input (sort of), so # keep it like that. boxcox() doesn't work with 2-D input though, due # to brent() returning a scalar. np.random.seed(54321) x = stats.norm.rvs(size=100, loc=10) lmbda = 1 llf = stats.boxcox_llf(lmbda, x) llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T) assert_allclose([llf, llf], llf2, rtol=1e-12) def test_empty(self): assert_(np.isnan(stats.boxcox_llf(1, []))) class TestBoxcox(object): def test_fixed_lmbda(self): np.random.seed(12345) x = stats.loggamma.rvs(5, size=50) + 5 xt = stats.boxcox(x, lmbda=1) assert_allclose(xt, x - 1) xt = stats.boxcox(x, lmbda=-1) assert_allclose(xt, 1 - 1/x) xt = stats.boxcox(x, lmbda=0) assert_allclose(xt, np.log(x)) # Also test that array_like input works xt = stats.boxcox(list(x), lmbda=0) assert_allclose(xt, np.log(x)) def test_lmbda_None(self): np.random.seed(1234567) # Start from normal rv's, do inverse transform to check that # optimization function gets close to the right answer. np.random.seed(1245) lmbda = 2.5 x = stats.norm.rvs(loc=10, size=50000) x_inv = (x * lmbda + 1)**(-lmbda) xt, maxlog = stats.boxcox(x_inv) assert_almost_equal(maxlog, -1 / lmbda, decimal=2) def test_alpha(self): np.random.seed(1234) x = stats.loggamma.rvs(5, size=50) + 5 # Some regular values for alpha, on a small sample size _, _, interval = stats.boxcox(x, alpha=0.75) assert_allclose(interval, [4.004485780226041, 5.138756355035744]) _, _, interval = stats.boxcox(x, alpha=0.05) assert_allclose(interval, [1.2138178554857557, 8.209033272375663]) # Try some extreme values, see we don't hit the N=500 limit x = stats.loggamma.rvs(7, size=500) + 15 _, _, interval = stats.boxcox(x, alpha=0.001) assert_allclose(interval, [0.3988867, 11.40553131]) _, _, interval = stats.boxcox(x, alpha=0.999) assert_allclose(interval, [5.83316246, 5.83735292]) def test_boxcox_bad_arg(self): # Raise ValueError if any data value is negative. x = np.array([-1]) assert_raises(ValueError, stats.boxcox, x) def test_empty(self): assert_(stats.boxcox([]).shape == (0,)) class TestBoxcoxNormmax(object): def setup_method(self): np.random.seed(12345) self.x = stats.loggamma.rvs(5, size=50) + 5 def test_pearsonr(self): maxlog = stats.boxcox_normmax(self.x) assert_allclose(maxlog, 1.804465, rtol=1e-6) def test_mle(self): maxlog = stats.boxcox_normmax(self.x, method='mle') assert_allclose(maxlog, 1.758101, rtol=1e-6) # Check that boxcox() uses 'mle' _, maxlog_boxcox = stats.boxcox(self.x) assert_allclose(maxlog_boxcox, maxlog) def test_all(self): maxlog_all = stats.boxcox_normmax(self.x, method='all') assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6) class TestBoxcoxNormplot(object): def setup_method(self): np.random.seed(7654321) self.x = stats.loggamma.rvs(5, size=500) + 5 def test_basic(self): N = 5 lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N) ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057, 0.95843297] assert_allclose(lmbdas, np.linspace(-10, 10, num=N)) assert_allclose(ppcc, ppcc_expected) @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib") def test_plot_kwarg(self): # Check with the matplotlib.pyplot module fig = plt.figure() ax = fig.add_subplot(111) stats.boxcox_normplot(self.x, -20, 20, plot=plt) fig.delaxes(ax) # Check that a Matplotlib Axes object is accepted ax = fig.add_subplot(111) stats.boxcox_normplot(self.x, -20, 20, plot=ax) plt.close() def test_invalid_inputs(self): # `lb` has to be larger than `la` assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0) # `x` can not contain negative values assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1) def test_empty(self): assert_(stats.boxcox_normplot([], 0, 1).size == 0) class TestCircFuncs(object): def test_circfuncs(self): x = np.array([355, 5, 2, 359, 10, 350]) M = stats.circmean(x, high=360) Mval = 0.167690146 assert_allclose(M, Mval, rtol=1e-7) V = stats.circvar(x, high=360) Vval = 42.51955609 assert_allclose(V, Vval, rtol=1e-7) S = stats.circstd(x, high=360) Sval = 6.520702116 assert_allclose(S, Sval, rtol=1e-7) def test_circfuncs_small(self): x = np.array([20, 21, 22, 18, 19, 20.5, 19.2]) M1 = x.mean() M2 = stats.circmean(x, high=360) assert_allclose(M2, M1, rtol=1e-5) V1 = x.var() V2 = stats.circvar(x, high=360) assert_allclose(V2, V1, rtol=1e-4) S1 = x.std() S2 = stats.circstd(x, high=360) assert_allclose(S2, S1, rtol=1e-4) def test_circmean_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) M1 = stats.circmean(x, high=360) M2 = stats.circmean(x.ravel(), high=360) assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=1) M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])] assert_allclose(M1, M2, rtol=1e-14) M1 = stats.circmean(x, high=360, axis=0) M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(M1, M2, rtol=1e-14) def test_circvar_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) V1 = stats.circvar(x, high=360) V2 = stats.circvar(x.ravel(), high=360) assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=1) V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])] assert_allclose(V1, V2, rtol=1e-11) V1 = stats.circvar(x, high=360, axis=0) V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(V1, V2, rtol=1e-11) def test_circstd_axis(self): x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]]) S1 = stats.circstd(x, high=360) S2 = stats.circstd(x.ravel(), high=360) assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=1) S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])] assert_allclose(S1, S2, rtol=1e-11) S1 = stats.circstd(x, high=360, axis=0) S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])] assert_allclose(S1, S2, rtol=1e-11) def test_circfuncs_array_like(self): x = [355, 5, 2, 359, 10, 350] assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7) assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7) assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7) def test_empty(self): assert_(np.isnan(stats.circmean([]))) assert_(np.isnan(stats.circstd([]))) assert_(np.isnan(stats.circvar([]))) def test_circmean_scalar(self): x = 1. M1 = x M2 = stats.circmean(x) assert_allclose(M2, M1, rtol=1e-5) def test_circmean_range(self): # regression test for gh-6420: circmean(..., high, low) must be # between `high` and `low` m = stats.circmean(np.arange(0, 2, 0.1), np.pi, -np.pi) assert_(m < np.pi) assert_(m > -np.pi) def test_circfuncs_unit8(self): # regression test for gh-7255: overflow when working with # numpy uint8 data type x = np.array([150, 10], dtype='uint8') assert_equal(stats.circmean(x, high=180), 170.0) assert_allclose(stats.circvar(x, high=180), 437.45871686, rtol=1e-7) assert_allclose(stats.circstd(x, high=180), 20.91551378, rtol=1e-7) def test_accuracy_wilcoxon(): freq = [1, 4, 16, 15, 8, 4, 5, 1, 2] nums = range(-4, 5) x = np.concatenate([[u] * v for u, v in zip(nums, freq)]) y = np.zeros(x.size) T, p = stats.wilcoxon(x, y, "pratt") assert_allclose(T, 423) assert_allclose(p, 0.00197547303533107) T, p = stats.wilcoxon(x, y, "zsplit") assert_allclose(T, 441) assert_allclose(p, 0.0032145343172473055) T, p = stats.wilcoxon(x, y, "wilcox") assert_allclose(T, 327) assert_allclose(p, 0.00641346115861) # Test the 'correction' option, using values computed in R with: # > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE}) x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) T, p = stats.wilcoxon(x, y, correction=False) assert_equal(T, 34) assert_allclose(p, 0.6948866, rtol=1e-6) T, p = stats.wilcoxon(x, y, correction=True) assert_equal(T, 34) assert_allclose(p, 0.7240817, rtol=1e-6) def test_wilcoxon_result_attributes(): x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112]) y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187]) res = stats.wilcoxon(x, y, correction=False) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_wilcoxon_tie(): # Regression test for gh-2391. # Corresponding R code is: # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE) # > result$p.value # [1] 0.001565402 # > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE) # > result$p.value # [1] 0.001904195 stat, p = stats.wilcoxon([0.1] * 10) expected_p = 0.001565402 assert_equal(stat, 0) assert_allclose(p, expected_p, rtol=1e-6) stat, p = stats.wilcoxon([0.1] * 10, correction=True) expected_p = 0.001904195 assert_equal(stat, 0) assert_allclose(p, expected_p, rtol=1e-6) class TestMedianTest(object): def test_bad_n_samples(self): # median_test requires at least two samples. assert_raises(ValueError, stats.median_test, [1, 2, 3]) def test_empty_sample(self): # Each sample must contain at least one value. assert_raises(ValueError, stats.median_test, [], [1, 2, 3]) def test_empty_when_ties_ignored(self): # The grand median is 1, and all values in the first argument are # equal to the grand median. With ties="ignore", those values are # ignored, which results in the first sample being (in effect) empty. # This should raise a ValueError. assert_raises(ValueError, stats.median_test, [1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore") def test_empty_contingency_row(self): # The grand median is 1, and with the default ties="below", all the # values in the samples are counted as being below the grand median. # This would result a row of zeros in the contingency table, which is # an error. assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1]) # With ties="above", all the values are counted as above the # grand median. assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1], ties="above") def test_bad_ties(self): assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], ties="foo") def test_bad_nan_policy(self): assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar') def test_bad_keyword(self): assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5], foo="foo") def test_simple(self): x = [1, 2, 3] y = [1, 2, 3] stat, p, med, tbl = stats.median_test(x, y) # The median is floating point, but this equality test should be safe. assert_equal(med, 2.0) assert_array_equal(tbl, [[1, 1], [2, 2]]) # The expected values of the contingency table equal the contingency # table, so the statistic should be 0 and the p-value should be 1. assert_equal(stat, 0) assert_equal(p, 1) def test_ties_options(self): # Test the contingency table calculation. x = [1, 2, 3, 4] y = [5, 6] z = [7, 8, 9] # grand median is 5. # Default 'ties' option is "below". stat, p, m, tbl = stats.median_test(x, y, z) assert_equal(m, 5) assert_equal(tbl, [[0, 1, 3], [4, 1, 0]]) stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore") assert_equal(m, 5) assert_equal(tbl, [[0, 1, 3], [4, 0, 0]]) stat, p, m, tbl = stats.median_test(x, y, z, ties="above") assert_equal(m, 5) assert_equal(tbl, [[0, 2, 3], [4, 0, 0]]) def test_nan_policy_options(self): x = [1, 2, np.nan] y = [4, 5, 6] mt1 = stats.median_test(x, y, nan_policy='propagate') s, p, m, t = stats.median_test(x, y, nan_policy='omit') assert_equal(mt1, (np.nan, np.nan, np.nan, None)) assert_allclose(s, 0.31250000000000006) assert_allclose(p, 0.57615012203057869) assert_equal(m, 4.0) assert_equal(t, np.array([[0, 2],[2, 1]])) assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise') def test_basic(self): # median_test calls chi2_contingency to compute the test statistic # and p-value. Make sure it hasn't screwed up the call... x = [1, 2, 3, 4, 5] y = [2, 4, 6, 8] stat, p, m, tbl = stats.median_test(x, y) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p) stat, p, m, tbl = stats.median_test(x, y, lambda_=0) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p) stat, p, m, tbl = stats.median_test(x, y, correction=False) assert_equal(m, 4) assert_equal(tbl, [[1, 2], [4, 2]]) exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False) assert_allclose(stat, exp_stat) assert_allclose(p, exp_p)
55,766
37.889121
92
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_tukeylambda_stats.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose, assert_equal from scipy.stats._tukeylambda_stats import (tukeylambda_variance, tukeylambda_kurtosis) def test_tukeylambda_stats_known_exact(): """Compare results with some known exact formulas.""" # Some exact values of the Tukey Lambda variance and kurtosis: # lambda var kurtosis # 0 pi**2/3 6/5 (logistic distribution) # 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3 # 1 1/3 -6/5 (uniform distribution on (-1,1)) # 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2)) # lambda = 0 var = tukeylambda_variance(0) assert_allclose(var, np.pi**2 / 3, atol=1e-12) kurt = tukeylambda_kurtosis(0) assert_allclose(kurt, 1.2, atol=1e-10) # lambda = 0.5 var = tukeylambda_variance(0.5) assert_allclose(var, 4 - np.pi, atol=1e-12) kurt = tukeylambda_kurtosis(0.5) desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3 assert_allclose(kurt, desired, atol=1e-10) # lambda = 1 var = tukeylambda_variance(1) assert_allclose(var, 1.0 / 3, atol=1e-12) kurt = tukeylambda_kurtosis(1) assert_allclose(kurt, -1.2, atol=1e-10) # lambda = 2 var = tukeylambda_variance(2) assert_allclose(var, 1.0 / 12, atol=1e-12) kurt = tukeylambda_kurtosis(2) assert_allclose(kurt, -1.2, atol=1e-10) def test_tukeylambda_stats_mpmath(): """Compare results with some values that were computed using mpmath.""" a10 = dict(atol=1e-10, rtol=0) a12 = dict(atol=1e-12, rtol=0) data = [ # lambda variance kurtosis [-0.1, 4.78050217874253547, 3.78559520346454510], [-0.0649, 4.16428023599895777, 2.52019675947435718], [-0.05, 3.93672267890775277, 2.13129793057777277], [-0.001, 3.30128380390964882, 1.21452460083542988], [0.001, 3.27850775649572176, 1.18560634779287585], [0.03125, 2.95927803254615800, 0.804487555161819980], [0.05, 2.78281053405464501, 0.611604043886644327], [0.0649, 2.65282386754100551, 0.476834119532774540], [1.2, 0.242153920578588346, -1.23428047169049726], [10.0, 0.00095237579757703597, 2.37810697355144933], [20.0, 0.00012195121951131043, 7.37654321002709531], ] for lam, var_expected, kurt_expected in data: var = tukeylambda_variance(lam) assert_allclose(var, var_expected, **a12) kurt = tukeylambda_kurtosis(lam) assert_allclose(kurt, kurt_expected, **a10) # Test with vector arguments (most of the other tests are for single # values). lam, var_expected, kurt_expected = zip(*data) var = tukeylambda_variance(lam) assert_allclose(var, var_expected, **a12) kurt = tukeylambda_kurtosis(lam) assert_allclose(kurt, kurt_expected, **a10) def test_tukeylambda_stats_invalid(): """Test values of lambda outside the domains of the functions.""" lam = [-1.0, -0.5] var = tukeylambda_variance(lam) assert_equal(var, np.array([np.nan, np.inf])) lam = [-1.0, -0.25] kurt = tukeylambda_kurtosis(lam) assert_equal(kurt, np.array([np.nan, np.inf]))
3,298
36.067416
75
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_contingency.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal, assert_approx_equal, assert_allclose) from pytest import raises as assert_raises from scipy.special import xlogy from scipy.stats.contingency import margins, expected_freq, chi2_contingency def test_margins(): a = np.array([1]) m = margins(a) assert_equal(len(m), 1) m0 = m[0] assert_array_equal(m0, np.array([1])) a = np.array([[1]]) m0, m1 = margins(a) expected0 = np.array([[1]]) expected1 = np.array([[1]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) a = np.arange(12).reshape(2, 6) m0, m1 = margins(a) expected0 = np.array([[15], [51]]) expected1 = np.array([[6, 8, 10, 12, 14, 16]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) a = np.arange(24).reshape(2, 3, 4) m0, m1, m2 = margins(a) expected0 = np.array([[[66]], [[210]]]) expected1 = np.array([[[60], [92], [124]]]) expected2 = np.array([[[60, 66, 72, 78]]]) assert_array_equal(m0, expected0) assert_array_equal(m1, expected1) assert_array_equal(m2, expected2) def test_expected_freq(): assert_array_equal(expected_freq([1]), np.array([1.0])) observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]]) e = expected_freq(observed) assert_array_equal(e, np.ones_like(observed)) observed = np.array([[10, 10, 20], [20, 20, 20]]) e = expected_freq(observed) correct = np.array([[12., 12., 16.], [18., 18., 24.]]) assert_array_almost_equal(e, correct) def test_chi2_contingency_trivial(): # Some very simple tests for chi2_contingency. # A trivial case obs = np.array([[1, 2], [1, 2]]) chi2, p, dof, expected = chi2_contingency(obs, correction=False) assert_equal(chi2, 0.0) assert_equal(p, 1.0) assert_equal(dof, 1) assert_array_equal(obs, expected) # A *really* trivial case: 1-D data. obs = np.array([1, 2, 3]) chi2, p, dof, expected = chi2_contingency(obs, correction=False) assert_equal(chi2, 0.0) assert_equal(p, 1.0) assert_equal(dof, 0) assert_array_equal(obs, expected) def test_chi2_contingency_R(): # Some test cases that were computed independently, using R. Rcode = \ """ # Data vector. data <- c( 12, 34, 23, 4, 47, 11, 35, 31, 11, 34, 10, 18, 12, 32, 9, 18, 13, 19, 12, 12, 14, 9, 33, 25 ) # Create factor tags:r=rows, c=columns, t=tiers r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4"))) c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3"))) t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2"))) # 3-way Chi squared test of independence s = summary(xtabs(data~r+c+t)) print(s) """ Routput = \ """ Call: xtabs(formula = data ~ r + c + t) Number of cases in table: 478 Number of factors: 3 Test for independence of all factors: Chisq = 102.17, df = 17, p-value = 3.514e-14 """ obs = np.array( [[[12, 34, 23], [35, 31, 11], [12, 32, 9], [12, 12, 14]], [[4, 47, 11], [34, 10, 18], [18, 13, 19], [9, 33, 25]]]) chi2, p, dof, expected = chi2_contingency(obs) assert_approx_equal(chi2, 102.17, significant=5) assert_approx_equal(p, 3.514e-14, significant=4) assert_equal(dof, 17) Rcode = \ """ # Data vector. data <- c( # 12, 17, 11, 16, # 11, 12, 15, 16, # 23, 15, 30, 22, # 14, 17, 15, 16 ) # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2"))) c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2"))) d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2"))) t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2"))) # 4-way Chi squared test of independence s = summary(xtabs(data~r+c+d+t)) print(s) """ Routput = \ """ Call: xtabs(formula = data ~ r + c + d + t) Number of cases in table: 262 Number of factors: 4 Test for independence of all factors: Chisq = 8.758, df = 11, p-value = 0.6442 """ obs = np.array( [[[[12, 17], [11, 16]], [[11, 12], [15, 16]]], [[[23, 15], [30, 22]], [[14, 17], [15, 16]]]]) chi2, p, dof, expected = chi2_contingency(obs) assert_approx_equal(chi2, 8.758, significant=4) assert_approx_equal(p, 0.6442, significant=4) assert_equal(dof, 11) def test_chi2_contingency_g(): c = np.array([[15, 60], [15, 90]]) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=False) assert_allclose(g, 2*xlogy(c, c/e).sum()) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood', correction=True) c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]]) assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum()) c = np.array([[10, 12, 10], [12, 10, 10]]) g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood') assert_allclose(g, 2*xlogy(c, c/e).sum()) def test_chi2_contingency_bad_args(): # Test that "bad" inputs raise a ValueError. # Negative value in the array of observed frequencies. obs = np.array([[-1, 10], [1, 2]]) assert_raises(ValueError, chi2_contingency, obs) # The zeros in this will result in zeros in the array # of expected frequencies. obs = np.array([[0, 1], [0, 1]]) assert_raises(ValueError, chi2_contingency, obs) # A degenerate case: `observed` has size 0. obs = np.empty((0, 8)) assert_raises(ValueError, chi2_contingency, obs)
5,910
28.40796
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_continuous_basic.py
from __future__ import division, print_function, absolute_import import numpy as np import numpy.testing as npt import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings from scipy.integrate import IntegrationWarning from scipy import stats from scipy.special import betainc from. common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_meth_dtype, check_ppf_dtype, check_cmplx_deriv, check_pickling, check_rvs_broadcast) from scipy.stats._distr_params import distcont """ Test all continuous distributions. Parameters were chosen for those distributions that pass the Kolmogorov-Smirnov test. This provides safe parameters for each distributions so that we can perform further testing of class methods. These tests currently check only/mostly for serious errors and exceptions, not for numerically exact results. """ # Note that you need to add new distributions you want tested # to _distr_params DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5 # Last four of these fail all around. Need to be checked distcont_extra = [ ['betaprime', (100, 86)], ['fatiguelife', (5,)], ['mielke', (4.6420495492121487, 0.59707419545516938)], ['invweibull', (0.58847112119264788,)], # burr: sample mean test fails still for c<1 ['burr', (0.94839838075366045, 4.3820284068855795)], # genextreme: sample mean test, sf-logsf test fail ['genextreme', (3.3184017469423535,)], ] distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon', 'vonmises', 'vonmises_line', 'mielke', 'semicircular', 'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign'] # distslow are sorted by speed (very slow to slow) # These distributions fail the complex derivative test below. # Here 'fail' mean produce wrong results and/or raise exceptions, depending # on the implementation details of corresponding special functions. # cf https://github.com/scipy/scipy/pull/4979 for a discussion. fails_cmplx = set(['beta', 'betaprime', 'chi', 'chi2', 'dgamma', 'dweibull', 'erlang', 'f', 'gamma', 'gausshyper', 'gengamma', 'gennorm', 'genpareto', 'halfgennorm', 'invgamma', 'ksone', 'kstwobign', 'levy_l', 'loggamma', 'logistic', 'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3', 'rice', 't', 'skewnorm', 'tukeylambda', 'vonmises', 'vonmises_line', 'rv_histogram_instance']) _h = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8) histogram_test_instance = stats.rv_histogram(_h) def cases_test_cont_basic(): for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]: if distname == 'levy_stable': continue elif distname in distslow: yield pytest.param(distname, arg, marks=pytest.mark.slow) else: yield distname, arg @pytest.mark.parametrize('distname,arg', cases_test_cont_basic()) def test_cont_basic(distname, arg): # this test skips slow distributions if distname == 'truncnorm': pytest.xfail(reason=distname) try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'rv_histogram_instance' np.random.seed(765456) sn = 500 with suppress_warnings() as sup: # frechet_l and frechet_r are deprecated, so all their # methods generate DeprecationWarnings. sup.filter(category=DeprecationWarning, message=".*frechet_") rvs = distfn.rvs(size=sn, *arg) sm = rvs.mean() sv = rvs.var() m, v = distfn.stats(*arg) check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, distname + 'sample mean test') check_cdf_ppf(distfn, arg, distname) check_sf_isf(distfn, arg, distname) check_pdf(distfn, arg, distname) check_pdf_logpdf(distfn, arg, distname) check_cdf_logcdf(distfn, arg, distname) check_sf_logsf(distfn, arg, distname) alpha = 0.01 if distname == 'rv_histogram_instance': check_distribution_rvs(distfn.cdf, arg, alpha, rvs) else: check_distribution_rvs(distname, arg, alpha, rvs) locscale_defaults = (0, 1) meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5, 'pareto': 1.5, 'tukeylambda': 0.3, 'rv_histogram_instance': 5.0} x = spec_x.get(distname, 0.5) if distname == 'invweibull': arg = (1,) elif distname == 'ksone': arg = (3,) check_named_args(distfn, x, arg, locscale_defaults, meths) check_random_state_property(distfn, arg) check_pickling(distfn, arg) # Entropy if distname not in ['ksone', 'kstwobign']: check_entropy(distfn, arg, distname) if distfn.numargs == 0: check_vecentropy(distfn, arg) if (distfn.__class__._entropy != stats.rv_continuous._entropy and distname != 'vonmises'): check_private_entropy(distfn, arg, stats.rv_continuous) check_edge_support(distfn, arg) check_meth_dtype(distfn, arg, meths) check_ppf_dtype(distfn, arg) if distname not in fails_cmplx: check_cmplx_deriv(distfn, arg) if distname != 'truncnorm': check_ppf_private(distfn, arg, distname) def test_levy_stable_random_state_property(): # levy_stable only implements rvs(), so it is skipped in the # main loop in test_cont_basic(). Here we apply just the test # check_random_state_property to levy_stable. check_random_state_property(stats.levy_stable, (0.5, 0.1)) def cases_test_moments(): fail_normalization = set(['vonmises', 'ksone']) fail_higher = set(['vonmises', 'ksone', 'ncf']) for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]: if distname == 'levy_stable': continue cond1 = distname not in fail_normalization cond2 = distname not in fail_higher yield distname, arg, cond1, cond2, False if not cond1 or not cond2: # Run the distributions that have issues twice, once skipping the # not_ok parts, once with the not_ok parts but marked as knownfail yield pytest.param(distname, arg, True, True, True, marks=pytest.mark.xfail) @pytest.mark.slow @pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,is_xfailing', cases_test_moments()) def test_moments(distname, arg, normalization_ok, higher_ok, is_xfailing): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'rv_histogram_instance' with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The integral is probably divergent, or slowly convergent.") sup.filter(category=DeprecationWarning, message=".*frechet_") if is_xfailing: sup.filter(IntegrationWarning) m, v, s, k = distfn.stats(*arg, moments='mvsk') if normalization_ok: check_normalization(distfn, arg, distname) if higher_ok: check_mean_expect(distfn, arg, m, distname) check_skew_expect(distfn, arg, m, v, s, distname) check_var_expect(distfn, arg, m, v, distname) check_kurt_expect(distfn, arg, m, v, k, distname) check_loc_scale(distfn, arg, m, v, distname) check_moment(distfn, arg, m, v, distname) @pytest.mark.parametrize('dist,shape_args', distcont) def test_rvs_broadcast(dist, shape_args): if dist in ['gausshyper', 'genexpon']: pytest.skip("too slow") # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['betaprime', 'dgamma', 'exponnorm', 'norminvgauss', 'nct', 'dweibull', 'rice', 'levy_stable', 'skewnorm'] distfunc = getattr(stats, dist) loc = np.zeros(2) scale = np.ones((3, 1)) nargs = distfunc.numargs allargs = [] bshape = [3, 2] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 4,) + (1,)*(k + 2) allargs.append(shape_args[k]*np.ones(shp)) bshape.insert(0, k + 4) allargs.extend([loc, scale]) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd') def test_rvs_gh2069_regression(): # Regression tests for gh-2069. In scipy 0.17 and earlier, # these tests would fail. # # A typical example of the broken behavior: # >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5)) # array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705]) np.random.seed(123) vals = stats.norm.rvs(loc=np.zeros(5), scale=1) d = np.diff(vals) npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") vals = stats.norm.rvs(loc=0, scale=np.ones(5)) d = np.diff(vals) npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5)) d = np.diff(vals) npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5)) d = np.diff(vals.ravel()) npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!") assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]], [[1, 1], [1, 1]], 1) assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2)) assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0], [[1], [2]], (4,)) def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg): # this did not work, skipped silently by nose if np.isfinite(m): check_sample_mean(sm, sv, sn, m) if np.isfinite(v): check_sample_var(sv, sn, v) def check_sample_mean(sm, v, n, popmean): # from stats.stats.ttest_1samp(a, popmean): # Calculates the t-obtained for the independent samples T-test on ONE group # of scores a, given a population mean. # # Returns: t-value, two-tailed prob df = n-1 svar = ((n-1)*v) / float(df) # looks redundant t = (sm-popmean) / np.sqrt(svar*(1.0/n)) prob = betainc(0.5*df, 0.5, df/(df + t*t)) # return t,prob npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' % (t, prob, popmean, sm)) def check_sample_var(sv, n, popvar): # two-sided chisquare test for sample variance equal to # hypothesized variance df = n-1 chi2 = (n-1)*popvar/float(popvar) pval = stats.distributions.chi2.sf(chi2, df) * 2 npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' % (chi2, pval, popvar, sv)) def check_cdf_ppf(distfn, arg, msg): values = [0.001, 0.5, 0.999] npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg), values, decimal=DECIMAL, err_msg=msg + ' - cdf-ppf roundtrip') def check_sf_isf(distfn, arg, msg): npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg), [0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg + ' - sf-isf roundtrip') npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg), 1.0 - distfn.sf([0.1, 0.9], *arg), decimal=DECIMAL, err_msg=msg + ' - cdf-sf relationship') def check_pdf(distfn, arg, msg): # compares pdf at median with numerical derivative of cdf median = distfn.ppf(0.5, *arg) eps = 1e-6 pdfv = distfn.pdf(median, *arg) if (pdfv < 1e-4) or (pdfv > 1e4): # avoid checking a case where pdf is close to zero or # huge (singularity) median = median + 0.1 pdfv = distfn.pdf(median, *arg) cdfdiff = (distfn.cdf(median + eps, *arg) - distfn.cdf(median - eps, *arg))/eps/2.0 # replace with better diff and better test (more points), # actually, this works pretty well msg += ' - cdf-pdf relationship' npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg) def check_pdf_logpdf(distfn, args, msg): # compares pdf at several points with the log of the pdf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) pdf = distfn.pdf(vals, *args) logpdf = distfn.logpdf(vals, *args) pdf = pdf[pdf != 0] logpdf = logpdf[np.isfinite(logpdf)] msg += " - logpdf-log(pdf) relationship" npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg) def check_sf_logsf(distfn, args, msg): # compares sf at several points with the log of the sf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) sf = distfn.sf(vals, *args) logsf = distfn.logsf(vals, *args) sf = sf[sf != 0] logsf = logsf[np.isfinite(logsf)] msg += " - logsf-log(sf) relationship" npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg) def check_cdf_logcdf(distfn, args, msg): # compares cdf at several points with the log of the cdf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) cdf = distfn.cdf(vals, *args) logcdf = distfn.logcdf(vals, *args) cdf = cdf[cdf != 0] logcdf = logcdf[np.isfinite(logcdf)] msg += " - logcdf-log(cdf) relationship" npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg) def check_distribution_rvs(dist, args, alpha, rvs): # test from scipy.stats.tests # this version reuses existing random variables D, pval = stats.kstest(rvs, dist, args=args, N=1000) if (pval < alpha): D, pval = stats.kstest(dist, '', args=args, N=1000) npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) + "; alpha = " + str(alpha) + "\nargs = " + str(args)) def check_vecentropy(distfn, args): npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args)) def check_loc_scale(distfn, arg, m, v, msg): loc, scale = 10.0, 10.0 mt, vt = distfn.stats(loc=loc, scale=scale, *arg) npt.assert_allclose(m*scale + loc, mt) npt.assert_allclose(v*scale*scale, vt) def check_ppf_private(distfn, arg, msg): # fails by design for truncnorm self.nb not defined ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg) npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
16,009
37.671498
91
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/common_tests.py
from __future__ import division, print_function, absolute_import import pickle import numpy as np import numpy.testing as npt from numpy.testing import assert_allclose, assert_equal from scipy._lib._numpy_compat import suppress_warnings from pytest import raises as assert_raises import numpy.ma.testutils as ma_npt from scipy._lib._util import getargspec_no_self as _getargspec from scipy import stats def check_named_results(res, attributes, ma=False): for i, attr in enumerate(attributes): if ma: ma_npt.assert_equal(res[i], getattr(res, attr)) else: npt.assert_equal(res[i], getattr(res, attr)) def check_normalization(distfn, args, distname): norm_moment = distfn.moment(0, *args) npt.assert_allclose(norm_moment, 1.0) # this is a temporary plug: either ncf or expect is problematic; # best be marked as a knownfail, but I've no clue how to do it. if distname == "ncf": atol, rtol = 1e-5, 0 else: atol, rtol = 1e-7, 1e-7 normalization_expect = distfn.expect(lambda x: 1, args=args) npt.assert_allclose(normalization_expect, 1.0, atol=atol, rtol=rtol, err_msg=distname, verbose=True) normalization_cdf = distfn.cdf(distfn.b, *args) npt.assert_allclose(normalization_cdf, 1.0) def check_moment(distfn, arg, m, v, msg): m1 = distfn.moment(1, *arg) m2 = distfn.moment(2, *arg) if not np.isinf(m): npt.assert_almost_equal(m1, m, decimal=10, err_msg=msg + ' - 1st moment') else: # or np.isnan(m1), npt.assert_(np.isinf(m1), msg + ' - 1st moment -infinite, m1=%s' % str(m1)) if not np.isinf(v): npt.assert_almost_equal(m2 - m1 * m1, v, decimal=10, err_msg=msg + ' - 2ndt moment') else: # or np.isnan(m2), npt.assert_(np.isinf(m2), msg + ' - 2nd moment -infinite, m2=%s' % str(m2)) def check_mean_expect(distfn, arg, m, msg): if np.isfinite(m): m1 = distfn.expect(lambda x: x, arg) npt.assert_almost_equal(m1, m, decimal=5, err_msg=msg + ' - 1st moment (expect)') def check_var_expect(distfn, arg, m, v, msg): if np.isfinite(v): m2 = distfn.expect(lambda x: x*x, arg) npt.assert_almost_equal(m2, v + m*m, decimal=5, err_msg=msg + ' - 2st moment (expect)') def check_skew_expect(distfn, arg, m, v, s, msg): if np.isfinite(s): m3e = distfn.expect(lambda x: np.power(x-m, 3), arg) npt.assert_almost_equal(m3e, s * np.power(v, 1.5), decimal=5, err_msg=msg + ' - skew') else: npt.assert_(np.isnan(s)) def check_kurt_expect(distfn, arg, m, v, k, msg): if np.isfinite(k): m4e = distfn.expect(lambda x: np.power(x-m, 4), arg) npt.assert_allclose(m4e, (k + 3.) * np.power(v, 2), atol=1e-5, rtol=1e-5, err_msg=msg + ' - kurtosis') else: npt.assert_(np.isnan(k)) def check_entropy(distfn, arg, msg): ent = distfn.entropy(*arg) npt.assert_(not np.isnan(ent), msg + 'test Entropy is nan') def check_private_entropy(distfn, args, superclass): # compare a generic _entropy with the distribution-specific implementation npt.assert_allclose(distfn._entropy(*args), superclass._entropy(distfn, *args)) def check_edge_support(distfn, args): # Make sure that x=self.a and self.b are handled correctly. x = [distfn.a, distfn.b] if isinstance(distfn, stats.rv_discrete): x = [distfn.a - 1, distfn.b] npt.assert_equal(distfn.cdf(x, *args), [0.0, 1.0]) npt.assert_equal(distfn.sf(x, *args), [1.0, 0.0]) if distfn.name not in ('skellam', 'dlaplace'): # with a = -inf, log(0) generates warnings npt.assert_equal(distfn.logcdf(x, *args), [-np.inf, 0.0]) npt.assert_equal(distfn.logsf(x, *args), [0.0, -np.inf]) npt.assert_equal(distfn.ppf([0.0, 1.0], *args), x) npt.assert_equal(distfn.isf([0.0, 1.0], *args), x[::-1]) # out-of-bounds for isf & ppf npt.assert_(np.isnan(distfn.isf([-1, 2], *args)).all()) npt.assert_(np.isnan(distfn.ppf([-1, 2], *args)).all()) def check_named_args(distfn, x, shape_args, defaults, meths): ## Check calling w/ named arguments. # check consistency of shapes, numargs and _parse signature signature = _getargspec(distfn._parse_args) npt.assert_(signature.varargs is None) npt.assert_(signature.keywords is None) npt.assert_(list(signature.defaults) == list(defaults)) shape_argnames = signature.args[:-len(defaults)] # a, b, loc=0, scale=1 if distfn.shapes: shapes_ = distfn.shapes.replace(',', ' ').split() else: shapes_ = '' npt.assert_(len(shapes_) == distfn.numargs) npt.assert_(len(shapes_) == len(shape_argnames)) # check calling w/ named arguments shape_args = list(shape_args) vals = [meth(x, *shape_args) for meth in meths] npt.assert_(np.all(np.isfinite(vals))) names, a, k = shape_argnames[:], shape_args[:], {} while names: k.update({names.pop(): a.pop()}) v = [meth(x, *a, **k) for meth in meths] npt.assert_array_equal(vals, v) if 'n' not in k.keys(): # `n` is first parameter of moment(), so can't be used as named arg npt.assert_equal(distfn.moment(1, *a, **k), distfn.moment(1, *shape_args)) # unknown arguments should not go through: k.update({'kaboom': 42}) assert_raises(TypeError, distfn.cdf, x, **k) def check_random_state_property(distfn, args): # check the random_state attribute of a distribution *instance* # This test fiddles with distfn.random_state. This breaks other tests, # hence need to save it and then restore. rndm = distfn.random_state # baseline: this relies on the global state np.random.seed(1234) distfn.random_state = None r0 = distfn.rvs(*args, size=8) # use an explicit instance-level random_state distfn.random_state = 1234 r1 = distfn.rvs(*args, size=8) npt.assert_equal(r0, r1) distfn.random_state = np.random.RandomState(1234) r2 = distfn.rvs(*args, size=8) npt.assert_equal(r0, r2) # can override the instance-level random_state for an individual .rvs call distfn.random_state = 2 orig_state = distfn.random_state.get_state() r3 = distfn.rvs(*args, size=8, random_state=np.random.RandomState(1234)) npt.assert_equal(r0, r3) # ... and that does not alter the instance-level random_state! npt.assert_equal(distfn.random_state.get_state(), orig_state) # finally, restore the random_state distfn.random_state = rndm def check_meth_dtype(distfn, arg, meths): q0 = [0.25, 0.5, 0.75] x0 = distfn.ppf(q0, *arg) x_cast = [x0.astype(tp) for tp in (np.int_, np.float16, np.float32, np.float64)] for x in x_cast: # casting may have clipped the values, exclude those distfn._argcheck(*arg) x = x[(distfn.a < x) & (x < distfn.b)] for meth in meths: val = meth(x, *arg) npt.assert_(val.dtype == np.float_) def check_ppf_dtype(distfn, arg): q0 = np.asarray([0.25, 0.5, 0.75]) q_cast = [q0.astype(tp) for tp in (np.float16, np.float32, np.float64)] for q in q_cast: for meth in [distfn.ppf, distfn.isf]: val = meth(q, *arg) npt.assert_(val.dtype == np.float_) def check_cmplx_deriv(distfn, arg): # Distributions allow complex arguments. def deriv(f, x, *arg): x = np.asarray(x) h = 1e-10 return (f(x + h*1j, *arg)/h).imag x0 = distfn.ppf([0.25, 0.51, 0.75], *arg) x_cast = [x0.astype(tp) for tp in (np.int_, np.float16, np.float32, np.float64)] for x in x_cast: # casting may have clipped the values, exclude those distfn._argcheck(*arg) x = x[(distfn.a < x) & (x < distfn.b)] pdf, cdf, sf = distfn.pdf(x, *arg), distfn.cdf(x, *arg), distfn.sf(x, *arg) assert_allclose(deriv(distfn.cdf, x, *arg), pdf, rtol=1e-5) assert_allclose(deriv(distfn.logcdf, x, *arg), pdf/cdf, rtol=1e-5) assert_allclose(deriv(distfn.sf, x, *arg), -pdf, rtol=1e-5) assert_allclose(deriv(distfn.logsf, x, *arg), -pdf/sf, rtol=1e-5) assert_allclose(deriv(distfn.logpdf, x, *arg), deriv(distfn.pdf, x, *arg) / distfn.pdf(x, *arg), rtol=1e-5) def check_pickling(distfn, args): # check that a distribution instance pickles and unpickles # pay special attention to the random_state property # save the random_state (restore later) rndm = distfn.random_state distfn.random_state = 1234 distfn.rvs(*args, size=8) s = pickle.dumps(distfn) r0 = distfn.rvs(*args, size=8) unpickled = pickle.loads(s) r1 = unpickled.rvs(*args, size=8) npt.assert_equal(r0, r1) # also smoke test some methods medians = [distfn.ppf(0.5, *args), unpickled.ppf(0.5, *args)] npt.assert_equal(medians[0], medians[1]) npt.assert_equal(distfn.cdf(medians[0], *args), unpickled.cdf(medians[1], *args)) # restore the random_state distfn.random_state = rndm def check_rvs_broadcast(distfunc, distname, allargs, shape, shape_only, otype): np.random.seed(123) with suppress_warnings() as sup: # frechet_l and frechet_r are deprecated, so all their # methods generate DeprecationWarnings. sup.filter(category=DeprecationWarning, message=".*frechet_") sample = distfunc.rvs(*allargs) assert_equal(sample.shape, shape, "%s: rvs failed to broadcast" % distname) if not shape_only: rvs = np.vectorize(lambda *allargs: distfunc.rvs(*allargs), otypes=otype) np.random.seed(123) expected = rvs(*allargs) assert_allclose(sample, expected, rtol=1e-15)
10,081
33.646048
85
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_discrete_basic.py
from __future__ import division, print_function, absolute_import import numpy.testing as npt import numpy as np from scipy._lib.six import xrange import pytest from scipy import stats from .common_tests import (check_normalization, check_moment, check_mean_expect, check_var_expect, check_skew_expect, check_kurt_expect, check_entropy, check_private_entropy, check_edge_support, check_named_args, check_random_state_property, check_pickling, check_rvs_broadcast) from scipy.stats._distr_params import distdiscrete vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4]) distdiscrete += [[stats.rv_discrete(values=vals), ()]] def cases_test_discrete_basic(): seen = set() for distname, arg in distdiscrete: yield distname, arg, distname not in seen seen.add(distname) @pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic()) def test_discrete_basic(distname, arg, first_case): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' np.random.seed(9765456) rvs = distfn.rvs(size=2000, *arg) supp = np.unique(rvs) m, v = distfn.stats(*arg) check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf') check_pmf_cdf(distfn, arg, distname) check_oth(distfn, arg, supp, distname + ' oth') check_edge_support(distfn, arg) alpha = 0.01 check_discrete_chisquare(distfn, arg, rvs, alpha, distname + ' chisquare') if first_case: locscale_defaults = (0,) meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf, distfn.logsf] # make sure arguments are within support spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, } k = spec_k.get(distname, 1) check_named_args(distfn, k, arg, locscale_defaults, meths) if distname != 'sample distribution': check_scale_docstring(distfn) check_random_state_property(distfn, arg) check_pickling(distfn, arg) # Entropy check_entropy(distfn, arg, distname) if distfn.__class__._entropy != stats.rv_discrete._entropy: check_private_entropy(distfn, arg, stats.rv_discrete) @pytest.mark.parametrize('distname,arg', distdiscrete) def test_moments(distname, arg): try: distfn = getattr(stats, distname) except TypeError: distfn = distname distname = 'sample distribution' m, v, s, k = distfn.stats(*arg, moments='mvsk') check_normalization(distfn, arg, distname) # compare `stats` and `moment` methods check_moment(distfn, arg, m, v, distname) check_mean_expect(distfn, arg, m, distname) check_var_expect(distfn, arg, m, v, distname) check_skew_expect(distfn, arg, m, v, s, distname) if distname not in ['zipf']: check_kurt_expect(distfn, arg, m, v, k, distname) # frozen distr moments check_moment_frozen(distfn, arg, m, 1) check_moment_frozen(distfn, arg, v+m*m, 2) @pytest.mark.parametrize('dist,shape_args', distdiscrete) def test_rvs_broadcast(dist, shape_args): # If shape_only is True, it means the _rvs method of the # distribution uses more than one random number to generate a random # variate. That means the result of using rvs with broadcasting or # with a nontrivial size will not necessarily be the same as using the # numpy.vectorize'd version of rvs(), so we can only compare the shapes # of the results, not the values. # Whether or not a distribution is in the following list is an # implementation detail of the distribution, not a requirement. If # the implementation the rvs() method of a distribution changes, this # test might also have to be changed. shape_only = dist in ['skellam'] try: distfunc = getattr(stats, dist) except TypeError: distfunc = dist dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk) loc = np.zeros(2) nargs = distfunc.numargs allargs = [] bshape = [] # Generate shape parameter arguments... for k in range(nargs): shp = (k + 3,) + (1,)*(k + 1) param_val = shape_args[k] allargs.append(param_val*np.ones(shp, dtype=np.array(param_val).dtype)) bshape.insert(0, shp[0]) allargs.append(loc) bshape.append(loc.size) # bshape holds the expected shape when loc, scale, and the shape # parameters are all broadcast together. check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_]) def check_cdf_ppf(distfn, arg, supp, msg): # cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer} npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg), supp, msg + '-roundtrip') npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg), supp, msg + '-roundtrip') if not hasattr(distfn, 'xk'): supp1 = supp[supp < distfn.b] npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg), supp1 + distfn.inc, msg + ' ppf-cdf-next') # -1e-8 could cause an error if pmf < 1e-8 def check_pmf_cdf(distfn, arg, distname): if hasattr(distfn, 'xk'): index = distfn.xk else: startind = int(distfn.ppf(0.01, *arg) - 1) index = list(range(startind, startind + 10)) cdfs = distfn.cdf(index, *arg) pmfs_cum = distfn.pmf(index, *arg).cumsum() atol, rtol = 1e-10, 1e-10 if distname == 'skellam': # ncx2 accuracy atol, rtol = 1e-5, 1e-5 npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0], atol=atol, rtol=rtol) def check_moment_frozen(distfn, arg, m, k): npt.assert_allclose(distfn(*arg).moment(k), m, atol=1e-10, rtol=1e-10) def check_oth(distfn, arg, supp, msg): # checking other methods of distfn npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg), atol=1e-10, rtol=1e-10) q = np.linspace(0.01, 0.99, 20) npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg), atol=1e-10, rtol=1e-10) median_sf = distfn.isf(0.5, *arg) npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5) npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5) def check_discrete_chisquare(distfn, arg, rvs, alpha, msg): """Perform chisquare test for random sample of a discrete distribution Parameters ---------- distname : string name of distribution function arg : sequence parameters of distribution alpha : float significance level, threshold for p-value Returns ------- result : bool 0 if test passes, 1 if test fails """ wsupp = 0.05 # construct intervals with minimum mass `wsupp`. # intervals are left-half-open as in a cdf difference lo = int(max(distfn.a, -1000)) distsupport = xrange(lo, int(min(distfn.b, 1000)) + 1) last = 0 distsupp = [lo] distmass = [] for ii in distsupport: current = distfn.cdf(ii, *arg) if current - last >= wsupp - 1e-14: distsupp.append(ii) distmass.append(current - last) last = current if current > (1 - wsupp): break if distsupp[-1] < distfn.b: distsupp.append(distfn.b) distmass.append(1 - last) distsupp = np.array(distsupp) distmass = np.array(distmass) # convert intervals to right-half-open as required by histogram histsupp = distsupp + 1e-8 histsupp[0] = distfn.a # find sample frequencies and perform chisquare test freq, hsupp = np.histogram(rvs, histsupp) chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass) npt.assert_(pval > alpha, 'chisquare - test for %s at arg = %s with pval = %s' % (msg, str(arg), str(pval))) def check_scale_docstring(distfn): if distfn.__doc__ is not None: # Docstrings can be stripped if interpreter is run with -OO npt.assert_('scale' not in distfn.__doc__)
8,295
34.302128
80
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_kdeoth.py
from __future__ import division, print_function, absolute_import from scipy import stats import numpy as np from numpy.testing import (assert_almost_equal, assert_, assert_array_almost_equal, assert_array_almost_equal_nulp) from pytest import raises as assert_raises def test_kde_1d(): #some basic tests comparing to normal distribution np.random.seed(8765678) n_basesample = 500 xn = np.random.randn(n_basesample) xnmean = xn.mean() xnstd = xn.std(ddof=1) # get kde for original sample gkde = stats.gaussian_kde(xn) # evaluate the density function for the kde for some points xs = np.linspace(-7,7,501) kdepdf = gkde.evaluate(xs) normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd) intervall = xs[1] - xs[0] assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01) prob1 = gkde.integrate_box_1d(xnmean, np.inf) prob2 = gkde.integrate_box_1d(-np.inf, xnmean) assert_almost_equal(prob1, 0.5, decimal=1) assert_almost_equal(prob2, 0.5, decimal=1) assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13) assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13) assert_almost_equal(gkde.integrate_kde(gkde), (kdepdf**2).sum()*intervall, decimal=2) assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2), (kdepdf*normpdf).sum()*intervall, decimal=2) def test_kde_2d(): #some basic tests comparing to normal distribution np.random.seed(8765678) n_basesample = 500 mean = np.array([1.0, 3.0]) covariance = np.array([[1.0, 2.0], [2.0, 6.0]]) # Need transpose (shape (2, 500)) for kde xn = np.random.multivariate_normal(mean, covariance, size=n_basesample).T # get kde for original sample gkde = stats.gaussian_kde(xn) # evaluate the density function for the kde for some points x, y = np.mgrid[-7:7:500j, -7:7:500j] grid_coords = np.vstack([x.ravel(), y.ravel()]) kdepdf = gkde.evaluate(grid_coords) kdepdf = kdepdf.reshape(500, 500) normpdf = stats.multivariate_normal.pdf(np.dstack([x, y]), mean=mean, cov=covariance) intervall = y.ravel()[1] - y.ravel()[0] assert_(np.sum((kdepdf - normpdf)**2) * (intervall**2) < 0.01) small = -1e100 large = 1e100 prob1 = gkde.integrate_box([small, mean[1]], [large, large]) prob2 = gkde.integrate_box([small, small], [large, mean[1]]) assert_almost_equal(prob1, 0.5, decimal=1) assert_almost_equal(prob2, 0.5, decimal=1) assert_almost_equal(gkde.integrate_kde(gkde), (kdepdf**2).sum()*(intervall**2), decimal=2) assert_almost_equal(gkde.integrate_gaussian(mean, covariance), (kdepdf*normpdf).sum()*(intervall**2), decimal=2) def test_kde_bandwidth_method(): def scotts_factor(kde_obj): """Same as default, just check that it works.""" return np.power(kde_obj.n, -1./(kde_obj.d+4)) np.random.seed(8765678) n_basesample = 50 xn = np.random.randn(n_basesample) # Default gkde = stats.gaussian_kde(xn) # Supply a callable gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor) # Supply a scalar gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor) xs = np.linspace(-7,7,51) kdepdf = gkde.evaluate(xs) kdepdf2 = gkde2.evaluate(xs) assert_almost_equal(kdepdf, kdepdf2) kdepdf3 = gkde3.evaluate(xs) assert_almost_equal(kdepdf, kdepdf3) assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring') # Subclasses that should stay working (extracted from various sources). # Unfortunately the earlier design of gaussian_kde made it necessary for users # to create these kinds of subclasses, or call _compute_covariance() directly. class _kde_subclass1(stats.gaussian_kde): def __init__(self, dataset): self.dataset = np.atleast_2d(dataset) self.d, self.n = self.dataset.shape self.covariance_factor = self.scotts_factor self._compute_covariance() class _kde_subclass2(stats.gaussian_kde): def __init__(self, dataset): self.covariance_factor = self.scotts_factor super(_kde_subclass2, self).__init__(dataset) class _kde_subclass3(stats.gaussian_kde): def __init__(self, dataset, covariance): self.covariance = covariance stats.gaussian_kde.__init__(self, dataset) def _compute_covariance(self): self.inv_cov = np.linalg.inv(self.covariance) self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \ * self.n class _kde_subclass4(stats.gaussian_kde): def covariance_factor(self): return 0.5 * self.silverman_factor() def test_gaussian_kde_subclassing(): x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=50) # gaussian_kde itself kde = stats.gaussian_kde(x1) ys = kde(xs) # subclass 1 kde1 = _kde_subclass1(x1) y1 = kde1(xs) assert_array_almost_equal_nulp(ys, y1, nulp=10) # subclass 2 kde2 = _kde_subclass2(x1) y2 = kde2(xs) assert_array_almost_equal_nulp(ys, y2, nulp=10) # subclass 3 kde3 = _kde_subclass3(x1, kde.covariance) y3 = kde3(xs) assert_array_almost_equal_nulp(ys, y3, nulp=10) # subclass 4 kde4 = _kde_subclass4(x1) y4 = kde4(x1) y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017] assert_array_almost_equal(y_expected, y4, decimal=6) # Not a subclass, but check for use of _compute_covariance() kde5 = kde kde5.covariance_factor = lambda: kde.factor kde5._compute_covariance() y5 = kde5(xs) assert_array_almost_equal_nulp(ys, y5, nulp=10) def test_gaussian_kde_covariance_caching(): x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=5) # These expected values are from scipy 0.10, before some changes to # gaussian_kde. They were not compared with any external reference. y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475] # Set the bandwidth, then reset it to the default. kde = stats.gaussian_kde(x1) kde.set_bandwidth(bw_method=0.5) kde.set_bandwidth(bw_method='scott') y2 = kde(xs) assert_array_almost_equal(y_expected, y2, decimal=7) def test_gaussian_kde_monkeypatch(): """Ugly, but people may rely on this. See scipy pull request 123, specifically the linked ML thread "Width of the Gaussian in stats.kde". If it is necessary to break this later on, that is to be discussed on ML. """ x1 = np.array([-7, -5, 1, 4, 5], dtype=float) xs = np.linspace(-10, 10, num=50) # The old monkeypatched version to get at Silverman's Rule. kde = stats.gaussian_kde(x1) kde.covariance_factor = kde.silverman_factor kde._compute_covariance() y1 = kde(xs) # The new saner version. kde2 = stats.gaussian_kde(x1, bw_method='silverman') y2 = kde2(xs) assert_array_almost_equal_nulp(y1, y2, nulp=10) def test_kde_integer_input(): """Regression test for #1181.""" x1 = np.arange(5) kde = stats.gaussian_kde(x1) y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721] assert_array_almost_equal(kde(x1), y_expected, decimal=6) def test_pdf_logpdf(): np.random.seed(1) n_basesample = 50 xn = np.random.randn(n_basesample) # Default gkde = stats.gaussian_kde(xn) xs = np.linspace(-15, 12, 25) pdf = gkde.evaluate(xs) pdf2 = gkde.pdf(xs) assert_almost_equal(pdf, pdf2, decimal=12) logpdf = np.log(pdf) logpdf2 = gkde.logpdf(xs) assert_almost_equal(logpdf, logpdf2, decimal=12) # There are more points than data gkde = stats.gaussian_kde(xs) pdf = np.log(gkde.evaluate(xn)) pdf2 = gkde.logpdf(xn) assert_almost_equal(pdf, pdf2, decimal=12)
7,923
31.342857
89
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_distributions.py
""" Test functions for stats module """ from __future__ import division, print_function, absolute_import import warnings import re import sys import pickle from numpy.testing import (assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_allclose, assert_, assert_warns) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings import numpy import numpy as np from numpy import typecodes, array from scipy import special from scipy.integrate import IntegrationWarning import scipy.stats as stats from scipy.stats._distn_infrastructure import argsreduce import scipy.stats.distributions from scipy.special import xlogy from .test_continuous_basic import distcont # python -OO strips docstrings DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 # Generate test cases to test cdf and distribution consistency. # Note that this list does not include all distributions. dists = ['uniform', 'norm', 'lognorm', 'expon', 'beta', 'powerlaw', 'bradford', 'burr', 'fisk', 'cauchy', 'halfcauchy', 'foldcauchy', 'gamma', 'gengamma', 'loggamma', 'alpha', 'anglit', 'arcsine', 'betaprime', 'dgamma', 'moyal', 'exponnorm', 'exponweib', 'exponpow', 'frechet_l', 'frechet_r', 'gilbrat', 'f', 'ncf', 'chi2', 'chi', 'nakagami', 'genpareto', 'genextreme', 'genhalflogistic', 'pareto', 'lomax', 'halfnorm', 'halflogistic', 'fatiguelife', 'foldnorm', 'ncx2', 't', 'nct', 'weibull_min', 'weibull_max', 'dweibull', 'maxwell', 'rayleigh', 'genlogistic', 'logistic', 'gumbel_l', 'gumbel_r', 'gompertz', 'hypsecant', 'laplace', 'reciprocal', 'trapz', 'triang', 'tukeylambda', 'vonmises', 'vonmises_line', 'pearson3', 'gennorm', 'halfgennorm', 'rice', 'kappa4', 'kappa3', 'truncnorm', 'argus', 'crystalball'] def _assert_hasattr(a, b, msg=None): if msg is None: msg = '%s does not have attribute %s' % (a, b) assert_(hasattr(a, b), msg=msg) def test_api_regression(): # https://github.com/scipy/scipy/issues/3802 _assert_hasattr(scipy.stats.distributions, 'f_gen') # check function for test generator def check_distribution(dist, args, alpha): with suppress_warnings() as sup: # frechet_l and frechet_r are deprecated, so all their # methods generate DeprecationWarnings. sup.filter(category=DeprecationWarning, message=".*frechet_") D, pval = stats.kstest(dist, '', args=args, N=1000) if (pval < alpha): D, pval = stats.kstest(dist, '', args=args, N=1000) assert_(pval > alpha, msg="D = {}; pval = {}; alpha = {}; args = {}".format( D, pval, alpha, args)) def cases_test_all_distributions(): np.random.seed(1234) for dist in dists: distfunc = getattr(stats, dist) nargs = distfunc.numargs alpha = 0.01 if dist == 'fatiguelife': alpha = 0.001 if dist == 'trapz': args = tuple(np.sort(np.random.random(nargs))) elif dist == 'triang': args = tuple(np.random.random(nargs)) elif dist == 'reciprocal' or dist == 'truncnorm': vals = np.random.random(nargs) vals[1] = vals[0] + 1.0 args = tuple(vals) elif dist == 'vonmises': yield dist, (10,), alpha yield dist, (101,), alpha args = tuple(1.0 + np.random.random(nargs)) else: args = tuple(1.0 + np.random.random(nargs)) yield dist, args, alpha @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) def test_all_distributions(dist, args, alpha): check_distribution(dist, args, alpha) def check_vonmises_pdf_periodic(k, l, s, x): vm = stats.vonmises(k, loc=l, scale=s) assert_almost_equal(vm.pdf(x), vm.pdf(x % (2*numpy.pi*s))) def check_vonmises_cdf_periodic(k, l, s, x): vm = stats.vonmises(k, loc=l, scale=s) assert_almost_equal(vm.cdf(x) % 1, vm.cdf(x % (2*numpy.pi*s)) % 1) def test_vonmises_pdf_periodic(): for k in [0.1, 1, 101]: for x in [0, 1, numpy.pi, 10, 100]: check_vonmises_pdf_periodic(k, 0, 1, x) check_vonmises_pdf_periodic(k, 1, 1, x) check_vonmises_pdf_periodic(k, 0, 10, x) check_vonmises_cdf_periodic(k, 0, 1, x) check_vonmises_cdf_periodic(k, 1, 1, x) check_vonmises_cdf_periodic(k, 0, 10, x) def test_vonmises_line_support(): assert_equal(stats.vonmises_line.a, -np.pi) assert_equal(stats.vonmises_line.b, np.pi) def test_vonmises_numerical(): vm = stats.vonmises(800) assert_almost_equal(vm.cdf(0), 0.5) @pytest.mark.parametrize('dist', ['alpha', 'betaprime', 'burr', 'burr12', 'fatiguelife', 'invgamma', 'invgauss', 'invweibull', 'johnsonsb', 'levy', 'levy_l', 'lognorm', 'gilbrat', 'powerlognorm', 'rayleigh', 'wald']) def test_support(dist): """gh-6235""" dct = dict(distcont) args = dct[dist] dist = getattr(stats, dist) assert_almost_equal(dist.pdf(dist.a, *args), 0) assert_equal(dist.logpdf(dist.a, *args), -np.inf) assert_almost_equal(dist.pdf(dist.b, *args), 0) assert_equal(dist.logpdf(dist.b, *args), -np.inf) class TestRandInt(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.randint.rvs(5, 30, size=100) assert_(numpy.all(vals < 30) & numpy.all(vals >= 5)) assert_(len(vals) == 100) vals = stats.randint.rvs(5, 30, size=(2, 50)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.randint.rvs(15, 46) assert_((val >= 15) & (val < 46)) assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val))) val = stats.randint(15, 46).rvs(3) assert_(val.dtype.char in typecodes['AllInteger']) def test_pdf(self): k = numpy.r_[0:36] out = numpy.where((k >= 5) & (k < 30), 1.0/(30-5), 0) vals = stats.randint.pmf(k, 5, 30) assert_array_almost_equal(vals, out) def test_cdf(self): x = numpy.r_[0:36:100j] k = numpy.floor(x) out = numpy.select([k >= 30, k >= 5], [1.0, (k-5.0+1)/(30-5.0)], 0) vals = stats.randint.cdf(x, 5, 30) assert_array_almost_equal(vals, out, decimal=12) class TestBinom(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.binom.rvs(10, 0.75, size=(2, 50)) assert_(numpy.all(vals >= 0) & numpy.all(vals <= 10)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.binom.rvs(10, 0.75) assert_(isinstance(val, int)) val = stats.binom(10, 0.75).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_pmf(self): # regression test for Ticket #1842 vals1 = stats.binom.pmf(100, 100, 1) vals2 = stats.binom.pmf(0, 100, 0) assert_allclose(vals1, 1.0, rtol=1e-15, atol=0) assert_allclose(vals2, 1.0, rtol=1e-15, atol=0) def test_entropy(self): # Basic entropy tests. b = stats.binom(2, 0.5) expected_p = np.array([0.25, 0.5, 0.25]) expected_h = -sum(xlogy(expected_p, expected_p)) h = b.entropy() assert_allclose(h, expected_h) b = stats.binom(2, 0.0) h = b.entropy() assert_equal(h, 0.0) b = stats.binom(2, 1.0) h = b.entropy() assert_equal(h, 0.0) def test_warns_p0(self): # no spurious warnigns are generated for p=0; gh-3817 with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) assert_equal(stats.binom(n=2, p=0).mean(), 0) assert_equal(stats.binom(n=2, p=0).std(), 0) class TestBernoulli(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.bernoulli.rvs(0.75, size=(2, 50)) assert_(numpy.all(vals >= 0) & numpy.all(vals <= 1)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.bernoulli.rvs(0.75) assert_(isinstance(val, int)) val = stats.bernoulli(0.75).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_entropy(self): # Simple tests of entropy. b = stats.bernoulli(0.25) expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75) h = b.entropy() assert_allclose(h, expected_h) b = stats.bernoulli(0.0) h = b.entropy() assert_equal(h, 0.0) b = stats.bernoulli(1.0) h = b.entropy() assert_equal(h, 0.0) class TestBradford(object): # gh-6216 def test_cdf_ppf(self): c = 0.1 x = np.logspace(-20, -4) q = stats.bradford.cdf(x, c) xx = stats.bradford.ppf(q, c) assert_allclose(x, xx) class TestNBinom(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.nbinom.rvs(10, 0.75, size=(2, 50)) assert_(numpy.all(vals >= 0)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.nbinom.rvs(10, 0.75) assert_(isinstance(val, int)) val = stats.nbinom(10, 0.75).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_pmf(self): # regression test for ticket 1779 assert_allclose(np.exp(stats.nbinom.logpmf(700, 721, 0.52)), stats.nbinom.pmf(700, 721, 0.52)) # logpmf(0,1,1) shouldn't return nan (regression test for gh-4029) val = scipy.stats.nbinom.logpmf(0, 1, 1) assert_equal(val, 0) class TestNormInvGauss(object): def setup_method(self): np.random.seed(1234) def test_cdf_R(self): # test pdf and cdf vals against R # require("GeneralizedHyperbolic") # x_test <- c(-7, -5, 0, 8, 15) # r_cdf <- GeneralizedHyperbolic::pnig(x_test, mu = 0, a = 1, b = 0.5) # r_pdf <- GeneralizedHyperbolic::dnig(x_test, mu = 0, a = 1, b = 0.5) r_cdf = np.array([8.034920282e-07, 2.512671945e-05, 3.186661051e-01, 9.988650664e-01, 9.999848769e-01]) x_test = np.array([-7, -5, 0, 8, 15]) vals_cdf = stats.norminvgauss.cdf(x_test, a=1, b=0.5) assert_allclose(vals_cdf, r_cdf, atol=1e-9) def test_pdf_R(self): # values from R as defined in test_cdf_R r_pdf = np.array([1.359600783e-06, 4.413878805e-05, 4.555014266e-01, 7.450485342e-04, 8.917889931e-06]) x_test = np.array([-7, -5, 0, 8, 15]) vals_pdf = stats.norminvgauss.pdf(x_test, a=1, b=0.5) assert_allclose(vals_pdf, r_pdf, atol=1e-9) def test_stats(self): a, b = 1, 0.5 gamma = np.sqrt(a**2 - b**2) v_stats = (b / gamma, a**2 / gamma**3, 3.0 * b / (a * np.sqrt(gamma)), 3.0 * (1 + 4 * b**2 / a**2) / gamma) assert_equal(v_stats, stats.norminvgauss.stats(a, b, moments='mvsk')) def test_ppf(self): a, b = 1, 0.5 x_test = np.array([0.001, 0.5, 0.999]) vals = stats.norminvgauss.ppf(x_test, a, b) assert_allclose(x_test, stats.norminvgauss.cdf(vals, a, b)) class TestGeom(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.geom.rvs(0.75, size=(2, 50)) assert_(numpy.all(vals >= 0)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.geom.rvs(0.75) assert_(isinstance(val, int)) val = stats.geom(0.75).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_pmf(self): vals = stats.geom.pmf([1, 2, 3], 0.5) assert_array_almost_equal(vals, [0.5, 0.25, 0.125]) def test_logpmf(self): # regression test for ticket 1793 vals1 = np.log(stats.geom.pmf([1, 2, 3], 0.5)) vals2 = stats.geom.logpmf([1, 2, 3], 0.5) assert_allclose(vals1, vals2, rtol=1e-15, atol=0) # regression test for gh-4028 val = stats.geom.logpmf(1, 1) assert_equal(val, 0.0) def test_cdf_sf(self): vals = stats.geom.cdf([1, 2, 3], 0.5) vals_sf = stats.geom.sf([1, 2, 3], 0.5) expected = array([0.5, 0.75, 0.875]) assert_array_almost_equal(vals, expected) assert_array_almost_equal(vals_sf, 1-expected) def test_logcdf_logsf(self): vals = stats.geom.logcdf([1, 2, 3], 0.5) vals_sf = stats.geom.logsf([1, 2, 3], 0.5) expected = array([0.5, 0.75, 0.875]) assert_array_almost_equal(vals, np.log(expected)) assert_array_almost_equal(vals_sf, np.log1p(-expected)) def test_ppf(self): vals = stats.geom.ppf([0.5, 0.75, 0.875], 0.5) expected = array([1.0, 2.0, 3.0]) assert_array_almost_equal(vals, expected) class TestPlanck(object): def setup_method(self): np.random.seed(1234) def test_sf(self): vals = stats.planck.sf([1, 2, 3], 5.) expected = array([4.5399929762484854e-05, 3.0590232050182579e-07, 2.0611536224385579e-09]) assert_array_almost_equal(vals, expected) def test_logsf(self): vals = stats.planck.logsf([1000., 2000., 3000.], 1000.) expected = array([-1001000., -2001000., -3001000.]) assert_array_almost_equal(vals, expected) class TestGennorm(object): def test_laplace(self): # test against Laplace (special case for beta=1) points = [1, 2, 3] pdf1 = stats.gennorm.pdf(points, 1) pdf2 = stats.laplace.pdf(points) assert_almost_equal(pdf1, pdf2) def test_norm(self): # test against normal (special case for beta=2) points = [1, 2, 3] pdf1 = stats.gennorm.pdf(points, 2) pdf2 = stats.norm.pdf(points, scale=2**-.5) assert_almost_equal(pdf1, pdf2) class TestHalfgennorm(object): def test_expon(self): # test against exponential (special case for beta=1) points = [1, 2, 3] pdf1 = stats.halfgennorm.pdf(points, 1) pdf2 = stats.expon.pdf(points) assert_almost_equal(pdf1, pdf2) def test_halfnorm(self): # test against half normal (special case for beta=2) points = [1, 2, 3] pdf1 = stats.halfgennorm.pdf(points, 2) pdf2 = stats.halfnorm.pdf(points, scale=2**-.5) assert_almost_equal(pdf1, pdf2) def test_gennorm(self): # test against generalized normal points = [1, 2, 3] pdf1 = stats.halfgennorm.pdf(points, .497324) pdf2 = stats.gennorm.pdf(points, .497324) assert_almost_equal(pdf1, 2*pdf2) class TestTruncnorm(object): def setup_method(self): np.random.seed(1234) def test_ppf_ticket1131(self): vals = stats.truncnorm.ppf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., loc=[3]*7, scale=2) expected = np.array([np.nan, 1, 1.00056419, 3, 4.99943581, 5, np.nan]) assert_array_almost_equal(vals, expected) def test_isf_ticket1131(self): vals = stats.truncnorm.isf([-0.5, 0, 1e-4, 0.5, 1-1e-4, 1, 2], -1., 1., loc=[3]*7, scale=2) expected = np.array([np.nan, 5, 4.99943581, 3, 1.00056419, 1, np.nan]) assert_array_almost_equal(vals, expected) def test_gh_2477_small_values(self): # Check a case that worked in the original issue. low, high = -11, -10 x = stats.truncnorm.rvs(low, high, 0, 1, size=10) assert_(low < x.min() < x.max() < high) # Check a case that failed in the original issue. low, high = 10, 11 x = stats.truncnorm.rvs(low, high, 0, 1, size=10) assert_(low < x.min() < x.max() < high) @pytest.mark.xfail(reason="truncnorm rvs is know to fail at extreme tails") def test_gh_2477_large_values(self): # Check a case that fails because of extreme tailness. low, high = 100, 101 with np.errstate(divide='ignore'): x = stats.truncnorm.rvs(low, high, 0, 1, size=10) assert_(low < x.min() < x.max() < high) def test_gh_1489_trac_962_rvs(self): # Check the original example. low, high = 10, 15 x = stats.truncnorm.rvs(low, high, 0, 1, size=10) assert_(low < x.min() < x.max() < high) class TestHypergeom(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.hypergeom.rvs(20, 10, 3, size=(2, 50)) assert_(numpy.all(vals >= 0) & numpy.all(vals <= 3)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.hypergeom.rvs(20, 3, 10) assert_(isinstance(val, int)) val = stats.hypergeom(20, 3, 10).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_precision(self): # comparison number from mpmath M = 2500 n = 50 N = 500 tot = M good = n hgpmf = stats.hypergeom.pmf(2, tot, good, N) assert_almost_equal(hgpmf, 0.0010114963068932233, 11) def test_args(self): # test correct output for corner cases of arguments # see gh-2325 assert_almost_equal(stats.hypergeom.pmf(0, 2, 1, 0), 1.0, 11) assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) assert_almost_equal(stats.hypergeom.pmf(0, 2, 0, 2), 1.0, 11) assert_almost_equal(stats.hypergeom.pmf(1, 2, 1, 0), 0.0, 11) def test_cdf_above_one(self): # for some values of parameters, hypergeom cdf was >1, see gh-2238 assert_(0 <= stats.hypergeom.cdf(30, 13397950, 4363, 12390) <= 1.0) def test_precision2(self): # Test hypergeom precision for large numbers. See #1218. # Results compared with those from R. oranges = 9.9e4 pears = 1.1e5 fruits_eaten = np.array([3, 3.8, 3.9, 4, 4.1, 4.2, 5]) * 1e4 quantile = 2e4 res = [] for eaten in fruits_eaten: res.append(stats.hypergeom.sf(quantile, oranges + pears, oranges, eaten)) expected = np.array([0, 1.904153e-114, 2.752693e-66, 4.931217e-32, 8.265601e-11, 0.1237904, 1]) assert_allclose(res, expected, atol=0, rtol=5e-7) # Test with array_like first argument quantiles = [1.9e4, 2e4, 2.1e4, 2.15e4] res2 = stats.hypergeom.sf(quantiles, oranges + pears, oranges, 4.2e4) expected2 = [1, 0.1237904, 6.511452e-34, 3.277667e-69] assert_allclose(res2, expected2, atol=0, rtol=5e-7) def test_entropy(self): # Simple tests of entropy. hg = stats.hypergeom(4, 1, 1) h = hg.entropy() expected_p = np.array([0.75, 0.25]) expected_h = -np.sum(xlogy(expected_p, expected_p)) assert_allclose(h, expected_h) hg = stats.hypergeom(1, 1, 1) h = hg.entropy() assert_equal(h, 0.0) def test_logsf(self): # Test logsf for very large numbers. See issue #4982 # Results compare with those from R (v3.2.0): # phyper(k, n, M-n, N, lower.tail=FALSE, log.p=TRUE) # -2239.771 k = 1e4 M = 1e7 n = 1e6 N = 5e4 result = stats.hypergeom.logsf(k, M, n, N) exspected = -2239.771 # From R assert_almost_equal(result, exspected, decimal=3) class TestLoggamma(object): def test_stats(self): # The following precomputed values are from the table in section 2.2 # of "A Statistical Study of Log-Gamma Distribution", by Ping Shing # Chan (thesis, McMaster University, 1993). table = np.array([ # c, mean, var, skew, exc. kurt. 0.5, -1.9635, 4.9348, -1.5351, 4.0000, 1.0, -0.5772, 1.6449, -1.1395, 2.4000, 12.0, 2.4427, 0.0869, -0.2946, 0.1735, ]).reshape(-1, 5) for c, mean, var, skew, kurt in table: computed = stats.loggamma.stats(c, moments='msvk') assert_array_almost_equal(computed, [mean, var, skew, kurt], decimal=4) class TestLogistic(object): # gh-6226 def test_cdf_ppf(self): x = np.linspace(-20, 20) y = stats.logistic.cdf(x) xx = stats.logistic.ppf(y) assert_allclose(x, xx) def test_sf_isf(self): x = np.linspace(-20, 20) y = stats.logistic.sf(x) xx = stats.logistic.isf(y) assert_allclose(x, xx) def test_extreme_values(self): # p is chosen so that 1 - (1 - p) == p in double precision p = 9.992007221626409e-16 desired = 34.53957599234088 assert_allclose(stats.logistic.ppf(1 - p), desired) assert_allclose(stats.logistic.isf(p), desired) class TestLogser(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.logser.rvs(0.75, size=(2, 50)) assert_(numpy.all(vals >= 1)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.logser.rvs(0.75) assert_(isinstance(val, int)) val = stats.logser(0.75).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_pmf_small_p(self): m = stats.logser.pmf(4, 1e-20) # The expected value was computed using mpmath: # >>> import mpmath # >>> mpmath.mp.dps = 64 # >>> k = 4 # >>> p = mpmath.mpf('1e-20') # >>> float(-(p**k)/k/mpmath.log(1-p)) # 2.5e-61 # It is also clear from noticing that for very small p, # log(1-p) is approximately -p, and the formula becomes # p**(k-1) / k assert_allclose(m, 2.5e-61) def test_mean_small_p(self): m = stats.logser.mean(1e-8) # The expected mean was computed using mpmath: # >>> import mpmath # >>> mpmath.dps = 60 # >>> p = mpmath.mpf('1e-8') # >>> float(-p / ((1 - p)*mpmath.log(1 - p))) # 1.000000005 assert_allclose(m, 1.000000005) class TestPareto(object): def test_stats(self): # Check the stats() method with some simple values. Also check # that the calculations do not trigger RuntimeWarnings. with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) m, v, s, k = stats.pareto.stats(0.5, moments='mvsk') assert_equal(m, np.inf) assert_equal(v, np.inf) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(1.0, moments='mvsk') assert_equal(m, np.inf) assert_equal(v, np.inf) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(1.5, moments='mvsk') assert_equal(m, 3.0) assert_equal(v, np.inf) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(2.0, moments='mvsk') assert_equal(m, 2.0) assert_equal(v, np.inf) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(2.5, moments='mvsk') assert_allclose(m, 2.5 / 1.5) assert_allclose(v, 2.5 / (1.5*1.5*0.5)) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(3.0, moments='mvsk') assert_allclose(m, 1.5) assert_allclose(v, 0.75) assert_equal(s, np.nan) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(3.5, moments='mvsk') assert_allclose(m, 3.5 / 2.5) assert_allclose(v, 3.5 / (2.5*2.5*1.5)) assert_allclose(s, (2*4.5/0.5)*np.sqrt(1.5/3.5)) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(4.0, moments='mvsk') assert_allclose(m, 4.0 / 3.0) assert_allclose(v, 4.0 / 18.0) assert_allclose(s, 2*(1+4.0)/(4.0-3) * np.sqrt((4.0-2)/4.0)) assert_equal(k, np.nan) m, v, s, k = stats.pareto.stats(4.5, moments='mvsk') assert_allclose(m, 4.5 / 3.5) assert_allclose(v, 4.5 / (3.5*3.5*2.5)) assert_allclose(s, (2*5.5/1.5) * np.sqrt(2.5/4.5)) assert_allclose(k, 6*(4.5**3 + 4.5**2 - 6*4.5 - 2)/(4.5*1.5*0.5)) def test_sf(self): x = 1e9 b = 2 scale = 1.5 p = stats.pareto.sf(x, b, loc=0, scale=scale) expected = (scale/x)**b # 2.25e-18 assert_allclose(p, expected) class TestGenpareto(object): def test_ab(self): # c >= 0: a, b = [0, inf] for c in [1., 0.]: c = np.asarray(c) stats.genpareto._argcheck(c) # ugh assert_equal(stats.genpareto.a, 0.) assert_(np.isposinf(stats.genpareto.b)) # c < 0: a=0, b=1/|c| c = np.asarray(-2.) stats.genpareto._argcheck(c) assert_allclose([stats.genpareto.a, stats.genpareto.b], [0., 0.5]) def test_c0(self): # with c=0, genpareto reduces to the exponential distribution rv = stats.genpareto(c=0.) x = np.linspace(0, 10., 30) assert_allclose(rv.pdf(x), stats.expon.pdf(x)) assert_allclose(rv.cdf(x), stats.expon.cdf(x)) assert_allclose(rv.sf(x), stats.expon.sf(x)) q = np.linspace(0., 1., 10) assert_allclose(rv.ppf(q), stats.expon.ppf(q)) def test_cm1(self): # with c=-1, genpareto reduces to the uniform distr on [0, 1] rv = stats.genpareto(c=-1.) x = np.linspace(0, 10., 30) assert_allclose(rv.pdf(x), stats.uniform.pdf(x)) assert_allclose(rv.cdf(x), stats.uniform.cdf(x)) assert_allclose(rv.sf(x), stats.uniform.sf(x)) q = np.linspace(0., 1., 10) assert_allclose(rv.ppf(q), stats.uniform.ppf(q)) # logpdf(1., c=-1) should be zero assert_allclose(rv.logpdf(1), 0) def test_x_inf(self): # make sure x=inf is handled gracefully rv = stats.genpareto(c=0.1) assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) assert_(np.isneginf(rv.logpdf(np.inf))) rv = stats.genpareto(c=0.) assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) assert_(np.isneginf(rv.logpdf(np.inf))) rv = stats.genpareto(c=-1.) assert_allclose([rv.pdf(np.inf), rv.cdf(np.inf)], [0., 1.]) assert_(np.isneginf(rv.logpdf(np.inf))) def test_c_continuity(self): # pdf is continuous at c=0, -1 x = np.linspace(0, 10, 30) for c in [0, -1]: pdf0 = stats.genpareto.pdf(x, c) for dc in [1e-14, -1e-14]: pdfc = stats.genpareto.pdf(x, c + dc) assert_allclose(pdf0, pdfc, atol=1e-12) cdf0 = stats.genpareto.cdf(x, c) for dc in [1e-14, 1e-14]: cdfc = stats.genpareto.cdf(x, c + dc) assert_allclose(cdf0, cdfc, atol=1e-12) def test_c_continuity_ppf(self): q = np.r_[np.logspace(1e-12, 0.01, base=0.1), np.linspace(0.01, 1, 30, endpoint=False), 1. - np.logspace(1e-12, 0.01, base=0.1)] for c in [0., -1.]: ppf0 = stats.genpareto.ppf(q, c) for dc in [1e-14, -1e-14]: ppfc = stats.genpareto.ppf(q, c + dc) assert_allclose(ppf0, ppfc, atol=1e-12) def test_c_continuity_isf(self): q = np.r_[np.logspace(1e-12, 0.01, base=0.1), np.linspace(0.01, 1, 30, endpoint=False), 1. - np.logspace(1e-12, 0.01, base=0.1)] for c in [0., -1.]: isf0 = stats.genpareto.isf(q, c) for dc in [1e-14, -1e-14]: isfc = stats.genpareto.isf(q, c + dc) assert_allclose(isf0, isfc, atol=1e-12) def test_cdf_ppf_roundtrip(self): # this should pass with machine precision. hat tip @pbrod q = np.r_[np.logspace(1e-12, 0.01, base=0.1), np.linspace(0.01, 1, 30, endpoint=False), 1. - np.logspace(1e-12, 0.01, base=0.1)] for c in [1e-8, -1e-18, 1e-15, -1e-15]: assert_allclose(stats.genpareto.cdf(stats.genpareto.ppf(q, c), c), q, atol=1e-15) def test_logsf(self): logp = stats.genpareto.logsf(1e10, .01, 0, 1) assert_allclose(logp, -1842.0680753952365) class TestPearson3(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.pearson3.rvs(0.1, size=(2, 50)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllFloat']) val = stats.pearson3.rvs(0.5) assert_(isinstance(val, float)) val = stats.pearson3(0.5).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllFloat']) assert_(len(val) == 3) def test_pdf(self): vals = stats.pearson3.pdf(2, [0.0, 0.1, 0.2]) assert_allclose(vals, np.array([0.05399097, 0.05555481, 0.05670246]), atol=1e-6) vals = stats.pearson3.pdf(-3, 0.1) assert_allclose(vals, np.array([0.00313791]), atol=1e-6) vals = stats.pearson3.pdf([-3, -2, -1, 0, 1], 0.1) assert_allclose(vals, np.array([0.00313791, 0.05192304, 0.25028092, 0.39885918, 0.23413173]), atol=1e-6) def test_cdf(self): vals = stats.pearson3.cdf(2, [0.0, 0.1, 0.2]) assert_allclose(vals, np.array([0.97724987, 0.97462004, 0.97213626]), atol=1e-6) vals = stats.pearson3.cdf(-3, 0.1) assert_allclose(vals, [0.00082256], atol=1e-6) vals = stats.pearson3.cdf([-3, -2, -1, 0, 1], 0.1) assert_allclose(vals, [8.22563821e-04, 1.99860448e-02, 1.58550710e-01, 5.06649130e-01, 8.41442111e-01], atol=1e-6) class TestKappa4(object): def test_cdf_genpareto(self): # h = 1 and k != 0 is generalized Pareto x = [0.0, 0.1, 0.2, 0.5] h = 1.0 for k in [-1.9, -1.0, -0.5, -0.2, -0.1, 0.1, 0.2, 0.5, 1.0, 1.9]: vals = stats.kappa4.cdf(x, h, k) # shape parameter is opposite what is expected vals_comp = stats.genpareto.cdf(x, -k) assert_allclose(vals, vals_comp) def test_cdf_genextreme(self): # h = 0 and k != 0 is generalized extreme value x = np.linspace(-5, 5, 10) h = 0.0 k = np.linspace(-3, 3, 10) vals = stats.kappa4.cdf(x, h, k) vals_comp = stats.genextreme.cdf(x, k) assert_allclose(vals, vals_comp) def test_cdf_expon(self): # h = 1 and k = 0 is exponential x = np.linspace(0, 10, 10) h = 1.0 k = 0.0 vals = stats.kappa4.cdf(x, h, k) vals_comp = stats.expon.cdf(x) assert_allclose(vals, vals_comp) def test_cdf_gumbel_r(self): # h = 0 and k = 0 is gumbel_r x = np.linspace(-5, 5, 10) h = 0.0 k = 0.0 vals = stats.kappa4.cdf(x, h, k) vals_comp = stats.gumbel_r.cdf(x) assert_allclose(vals, vals_comp) def test_cdf_logistic(self): # h = -1 and k = 0 is logistic x = np.linspace(-5, 5, 10) h = -1.0 k = 0.0 vals = stats.kappa4.cdf(x, h, k) vals_comp = stats.logistic.cdf(x) assert_allclose(vals, vals_comp) def test_cdf_uniform(self): # h = 1 and k = 1 is uniform x = np.linspace(-5, 5, 10) h = 1.0 k = 1.0 vals = stats.kappa4.cdf(x, h, k) vals_comp = stats.uniform.cdf(x) assert_allclose(vals, vals_comp) def test_integers_ctor(self): # regression test for gh-7416: _argcheck fails for integer h and k # in numpy 1.12 stats.kappa4(1, 2) class TestPoisson(object): def setup_method(self): np.random.seed(1234) def test_pmf_basic(self): # Basic case ln2 = np.log(2) vals = stats.poisson.pmf([0, 1, 2], ln2) expected = [0.5, ln2/2, ln2**2/4] assert_allclose(vals, expected) def test_mu0(self): # Edge case: mu=0 vals = stats.poisson.pmf([0, 1, 2], 0) expected = [1, 0, 0] assert_array_equal(vals, expected) interval = stats.poisson.interval(0.95, 0) assert_equal(interval, (0, 0)) def test_rvs(self): vals = stats.poisson.rvs(0.5, size=(2, 50)) assert_(numpy.all(vals >= 0)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.poisson.rvs(0.5) assert_(isinstance(val, int)) val = stats.poisson(0.5).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_stats(self): mu = 16.0 result = stats.poisson.stats(mu, moments='mvsk') assert_allclose(result, [mu, mu, np.sqrt(1.0/mu), 1.0/mu]) mu = np.array([0.0, 1.0, 2.0]) result = stats.poisson.stats(mu, moments='mvsk') expected = (mu, mu, [np.inf, 1, 1/np.sqrt(2)], [np.inf, 1, 0.5]) assert_allclose(result, expected) class TestZipf(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.zipf.rvs(1.5, size=(2, 50)) assert_(numpy.all(vals >= 1)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.zipf.rvs(1.5) assert_(isinstance(val, int)) val = stats.zipf(1.5).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) def test_moments(self): # n-th moment is finite iff a > n + 1 m, v = stats.zipf.stats(a=2.8) assert_(np.isfinite(m)) assert_equal(v, np.inf) s, k = stats.zipf.stats(a=4.8, moments='sk') assert_(not np.isfinite([s, k]).all()) class TestDLaplace(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): vals = stats.dlaplace.rvs(1.5, size=(2, 50)) assert_(numpy.shape(vals) == (2, 50)) assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.dlaplace.rvs(1.5) assert_(isinstance(val, int)) val = stats.dlaplace(1.5).rvs(3) assert_(isinstance(val, numpy.ndarray)) assert_(val.dtype.char in typecodes['AllInteger']) assert_(stats.dlaplace.rvs(0.8) is not None) def test_stats(self): # compare the explicit formulas w/ direct summation using pmf a = 1. dl = stats.dlaplace(a) m, v, s, k = dl.stats('mvsk') N = 37 xx = np.arange(-N, N+1) pp = dl.pmf(xx) m2, m4 = np.sum(pp*xx**2), np.sum(pp*xx**4) assert_equal((m, s), (0, 0)) assert_allclose((v, k), (m2, m4/m2**2 - 3.), atol=1e-14, rtol=1e-8) def test_stats2(self): a = np.log(2.) dl = stats.dlaplace(a) m, v, s, k = dl.stats('mvsk') assert_equal((m, s), (0., 0.)) assert_allclose((v, k), (4., 3.25)) class TestInvGamma(object): def test_invgamma_inf_gh_1866(self): # invgamma's moments are only finite for a>n # specific numbers checked w/ boost 1.54 with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) mvsk = stats.invgamma.stats(a=19.31, moments='mvsk') expected = [0.05461496450, 0.0001723162534, 1.020362676, 2.055616582] assert_allclose(mvsk, expected) a = [1.1, 3.1, 5.6] mvsk = stats.invgamma.stats(a=a, moments='mvsk') expected = ([10., 0.476190476, 0.2173913043], # mmm [np.inf, 0.2061430632, 0.01312749422], # vvv [np.nan, 41.95235392, 2.919025532], # sss [np.nan, np.nan, 24.51923076]) # kkk for x, y in zip(mvsk, expected): assert_almost_equal(x, y) def test_cdf_ppf(self): # gh-6245 x = np.logspace(-2.6, 0) y = stats.invgamma.cdf(x, 1) xx = stats.invgamma.ppf(y, 1) assert_allclose(x, xx) def test_sf_isf(self): # gh-6245 if sys.maxsize > 2**32: x = np.logspace(2, 100) else: # Invgamme roundtrip on 32-bit systems has relative accuracy # ~1e-15 until x=1e+15, and becomes inf above x=1e+18 x = np.logspace(2, 18) y = stats.invgamma.sf(x, 1) xx = stats.invgamma.isf(y, 1) assert_allclose(x, xx, rtol=1.0) class TestF(object): def test_f_moments(self): # n-th moment of F distributions is only finite for n < dfd / 2 m, v, s, k = stats.f.stats(11, 6.5, moments='mvsk') assert_(np.isfinite(m)) assert_(np.isfinite(v)) assert_(np.isfinite(s)) assert_(not np.isfinite(k)) def test_moments_warnings(self): # no warnings should be generated for dfd = 2, 4, 6, 8 (div by zero) with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) stats.f.stats(dfn=[11]*4, dfd=[2, 4, 6, 8], moments='mvsk') @pytest.mark.xfail(reason='f stats does not properly broadcast') def test_stats_broadcast(self): # stats do not fully broadcast just yet mv = stats.f.stats(dfn=11, dfd=[11, 12]) def test_rvgeneric_std(): # Regression test for #1191 assert_array_almost_equal(stats.t.std([5, 6]), [1.29099445, 1.22474487]) class TestRvDiscrete(object): def setup_method(self): np.random.seed(1234) def test_rvs(self): states = [-1, 0, 1, 2, 3, 4] probability = [0.0, 0.3, 0.4, 0.0, 0.3, 0.0] samples = 1000 r = stats.rv_discrete(name='sample', values=(states, probability)) x = r.rvs(size=samples) assert_(isinstance(x, numpy.ndarray)) for s, p in zip(states, probability): assert_(abs(sum(x == s)/float(samples) - p) < 0.05) x = r.rvs() assert_(isinstance(x, int)) def test_entropy(self): # Basic tests of entropy. pvals = np.array([0.25, 0.45, 0.3]) p = stats.rv_discrete(values=([0, 1, 2], pvals)) expected_h = -sum(xlogy(pvals, pvals)) h = p.entropy() assert_allclose(h, expected_h) p = stats.rv_discrete(values=([0, 1, 2], [1.0, 0, 0])) h = p.entropy() assert_equal(h, 0.0) def test_pmf(self): xk = [1, 2, 4] pk = [0.5, 0.3, 0.2] rv = stats.rv_discrete(values=(xk, pk)) x = [[1., 4.], [3., 2]] assert_allclose(rv.pmf(x), [[0.5, 0.2], [0., 0.3]], atol=1e-14) def test_cdf(self): xk = [1, 2, 4] pk = [0.5, 0.3, 0.2] rv = stats.rv_discrete(values=(xk, pk)) x_values = [-2, 1., 1.1, 1.5, 2.0, 3.0, 4, 5] expected = [0, 0.5, 0.5, 0.5, 0.8, 0.8, 1, 1] assert_allclose(rv.cdf(x_values), expected, atol=1e-14) # also check scalar arguments assert_allclose([rv.cdf(xx) for xx in x_values], expected, atol=1e-14) def test_ppf(self): xk = [1, 2, 4] pk = [0.5, 0.3, 0.2] rv = stats.rv_discrete(values=(xk, pk)) q_values = [0.1, 0.5, 0.6, 0.8, 0.9, 1.] expected = [1, 1, 2, 2, 4, 4] assert_allclose(rv.ppf(q_values), expected, atol=1e-14) # also check scalar arguments assert_allclose([rv.ppf(q) for q in q_values], expected, atol=1e-14) def test_cdf_ppf_next(self): # copied and special cased from test_discrete_basic vals = ([1, 2, 4, 7, 8], [0.1, 0.2, 0.3, 0.3, 0.1]) rv = stats.rv_discrete(values=vals) assert_array_equal(rv.ppf(rv.cdf(rv.xk[:-1]) + 1e-8), rv.xk[1:]) def test_expect(self): xk = [1, 2, 4, 6, 7, 11] pk = [0.1, 0.2, 0.2, 0.2, 0.2, 0.1] rv = stats.rv_discrete(values=(xk, pk)) assert_allclose(rv.expect(), np.sum(rv.xk * rv.pk), atol=1e-14) def test_bad_input(self): xk = [1, 2, 3] pk = [0.5, 0.5] assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) pk = [1, 2, 3] assert_raises(ValueError, stats.rv_discrete, **dict(values=(xk, pk))) class TestSkewNorm(object): def setup_method(self): np.random.seed(1234) def test_normal(self): # When the skewness is 0 the distribution is normal x = np.linspace(-5, 5, 100) assert_array_almost_equal(stats.skewnorm.pdf(x, a=0), stats.norm.pdf(x)) def test_rvs(self): shape = (3, 4, 5) x = stats.skewnorm.rvs(a=0.75, size=shape) assert_equal(shape, x.shape) x = stats.skewnorm.rvs(a=-3, size=shape) assert_equal(shape, x.shape) def test_moments(self): X = stats.skewnorm.rvs(a=4, size=int(1e6), loc=5, scale=2) expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] computed = stats.skewnorm.stats(a=4, loc=5, scale=2, moments='mvsk') assert_array_almost_equal(computed, expected, decimal=2) X = stats.skewnorm.rvs(a=-4, size=int(1e6), loc=5, scale=2) expected = [np.mean(X), np.var(X), stats.skew(X), stats.kurtosis(X)] computed = stats.skewnorm.stats(a=-4, loc=5, scale=2, moments='mvsk') assert_array_almost_equal(computed, expected, decimal=2) def test_cdf_large_x(self): # Regression test for gh-7746. # The x values are large enough that the closest 64 bit floating # point representation of the exact CDF is 1.0. p = stats.skewnorm.cdf([10, 20, 30], -1) assert_allclose(p, np.ones(3), rtol=1e-14) p = stats.skewnorm.cdf(25, 2.5) assert_allclose(p, 1.0, rtol=1e-14) def test_cdf_sf_small_values(self): # Triples are [x, a, cdf(x, a)]. These values were computed # using CDF[SkewNormDistribution[0, 1, a], x] in Wolfram Alpha. cdfvals = [ [-8, 1, 3.870035046664392611e-31], [-4, 2, 8.1298399188811398e-21], [-2, 5, 1.55326826787106273e-26], [-9, -1, 2.257176811907681295e-19], [-10, -4, 1.523970604832105213e-23], ] for x, a, cdfval in cdfvals: p = stats.skewnorm.cdf(x, a) assert_allclose(p, cdfval, rtol=1e-8) # For the skew normal distribution, sf(-x, -a) = cdf(x, a). p = stats.skewnorm.sf(-x, -a) assert_allclose(p, cdfval, rtol=1e-8) class TestExpon(object): def test_zero(self): assert_equal(stats.expon.pdf(0), 1) def test_tail(self): # Regression test for ticket 807 assert_equal(stats.expon.cdf(1e-18), 1e-18) assert_equal(stats.expon.isf(stats.expon.sf(40)), 40) class TestExponNorm(object): def test_moments(self): # Some moment test cases based on non-loc/scaled formula def get_moms(lam, sig, mu): # See wikipedia for these formulae # where it is listed as an exponentially modified gaussian opK2 = 1.0 + 1 / (lam*sig)**2 exp_skew = 2 / (lam * sig)**3 * opK2**(-1.5) exp_kurt = 6.0 * (1 + (lam * sig)**2)**(-2) return [mu + 1/lam, sig*sig + 1.0/(lam*lam), exp_skew, exp_kurt] mu, sig, lam = 0, 1, 1 K = 1.0 / (lam * sig) sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') assert_almost_equal(sts, get_moms(lam, sig, mu)) mu, sig, lam = -3, 2, 0.1 K = 1.0 / (lam * sig) sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') assert_almost_equal(sts, get_moms(lam, sig, mu)) mu, sig, lam = 0, 3, 1 K = 1.0 / (lam * sig) sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') assert_almost_equal(sts, get_moms(lam, sig, mu)) mu, sig, lam = -5, 11, 3.5 K = 1.0 / (lam * sig) sts = stats.exponnorm.stats(K, loc=mu, scale=sig, moments='mvsk') assert_almost_equal(sts, get_moms(lam, sig, mu)) def test_extremes_x(self): # Test for extreme values against overflows assert_almost_equal(stats.exponnorm.pdf(-900, 1), 0.0) assert_almost_equal(stats.exponnorm.pdf(+900, 1), 0.0) class TestGenExpon(object): def test_pdf_unity_area(self): from scipy.integrate import simps # PDF should integrate to one p = stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) assert_almost_equal(simps(p, dx=0.01), 1, 1) def test_cdf_bounds(self): # CDF should always be positive cdf = stats.genexpon.cdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0) assert_(numpy.all((0 <= cdf) & (cdf <= 1))) class TestExponpow(object): def test_tail(self): assert_almost_equal(stats.exponpow.cdf(1e-10, 2.), 1e-20) assert_almost_equal(stats.exponpow.isf(stats.exponpow.sf(5, .8), .8), 5) class TestSkellam(object): def test_pmf(self): # comparison to R k = numpy.arange(-10, 15) mu1, mu2 = 10, 5 skpmfR = numpy.array( [4.2254582961926893e-005, 1.1404838449648488e-004, 2.8979625801752660e-004, 6.9177078182101231e-004, 1.5480716105844708e-003, 3.2412274963433889e-003, 6.3373707175123292e-003, 1.1552351566696643e-002, 1.9606152375042644e-002, 3.0947164083410337e-002, 4.5401737566767360e-002, 6.1894328166820688e-002, 7.8424609500170578e-002, 9.2418812533573133e-002, 1.0139793148019728e-001, 1.0371927988298846e-001, 9.9076583077406091e-002, 8.8546660073089561e-002, 7.4187842052486810e-002, 5.8392772862200251e-002, 4.3268692953013159e-002, 3.0248159818374226e-002, 1.9991434305603021e-002, 1.2516877303301180e-002, 7.4389876226229707e-003]) assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15) def test_cdf(self): # comparison to R, only 5 decimals k = numpy.arange(-10, 15) mu1, mu2 = 10, 5 skcdfR = numpy.array( [6.4061475386192104e-005, 1.7810985988267694e-004, 4.6790611790020336e-004, 1.1596768997212152e-003, 2.7077485103056847e-003, 5.9489760066490718e-003, 1.2286346724161398e-002, 2.3838698290858034e-002, 4.3444850665900668e-002, 7.4392014749310995e-002, 1.1979375231607835e-001, 1.8168808048289900e-001, 2.6011268998306952e-001, 3.5253150251664261e-001, 4.5392943399683988e-001, 5.5764871387982828e-001, 6.5672529695723436e-001, 7.4527195703032389e-001, 8.1945979908281064e-001, 8.7785257194501087e-001, 9.2112126489802404e-001, 9.5136942471639818e-001, 9.7136085902200120e-001, 9.8387773632530240e-001, 9.9131672394792536e-001]) assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5) class TestLognorm(object): def test_pdf(self): # Regression test for Ticket #1471: avoid nan with 0/0 situation # Also make sure there are no warnings at x=0, cf gh-5202 with warnings.catch_warnings(): warnings.simplefilter('error', RuntimeWarning) pdf = stats.lognorm.pdf([0, 0.5, 1], 1) assert_array_almost_equal(pdf, [0.0, 0.62749608, 0.39894228]) def test_logcdf(self): # Regression test for gh-5940: sf et al would underflow too early x2, mu, sigma = 201.68, 195, 0.149 assert_allclose(stats.lognorm.sf(x2-mu, s=sigma), stats.norm.sf(np.log(x2-mu)/sigma)) assert_allclose(stats.lognorm.logsf(x2-mu, s=sigma), stats.norm.logsf(np.log(x2-mu)/sigma)) class TestBeta(object): def test_logpdf(self): # Regression test for Ticket #1326: avoid nan with 0*log(0) situation logpdf = stats.beta.logpdf(0, 1, 0.5) assert_almost_equal(logpdf, -0.69314718056) logpdf = stats.beta.logpdf(0, 0.5, 1) assert_almost_equal(logpdf, np.inf) def test_logpdf_ticket_1866(self): alpha, beta = 267, 1472 x = np.array([0.2, 0.5, 0.6]) b = stats.beta(alpha, beta) assert_allclose(b.logpdf(x).sum(), -1201.699061824062) assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) class TestBetaPrime(object): def test_logpdf(self): alpha, beta = 267, 1472 x = np.array([0.2, 0.5, 0.6]) b = stats.betaprime(alpha, beta) assert_(np.isfinite(b.logpdf(x)).all()) assert_allclose(b.pdf(x), np.exp(b.logpdf(x))) def test_cdf(self): # regression test for gh-4030: Implementation of # scipy.stats.betaprime.cdf() x = stats.betaprime.cdf(0, 0.2, 0.3) assert_equal(x, 0.0) alpha, beta = 267, 1472 x = np.array([0.2, 0.5, 0.6]) cdfs = stats.betaprime.cdf(x, alpha, beta) assert_(np.isfinite(cdfs).all()) # check the new cdf implementation vs generic one: gen_cdf = stats.rv_continuous._cdf_single cdfs_g = [gen_cdf(stats.betaprime, val, alpha, beta) for val in x] assert_allclose(cdfs, cdfs_g, atol=0, rtol=2e-12) class TestGamma(object): def test_pdf(self): # a few test cases to compare with R pdf = stats.gamma.pdf(90, 394, scale=1./5) assert_almost_equal(pdf, 0.002312341) pdf = stats.gamma.pdf(3, 10, scale=1./5) assert_almost_equal(pdf, 0.1620358) def test_logpdf(self): # Regression test for Ticket #1326: cornercase avoid nan with 0*log(0) # situation logpdf = stats.gamma.logpdf(0, 1) assert_almost_equal(logpdf, 0) class TestChi2(object): # regression tests after precision improvements, ticket:1041, not verified def test_precision(self): assert_almost_equal(stats.chi2.pdf(1000, 1000), 8.919133934753128e-003, decimal=14) assert_almost_equal(stats.chi2.pdf(100, 100), 0.028162503162596778, decimal=14) class TestGumbelL(object): # gh-6228 def test_cdf_ppf(self): x = np.linspace(-100, -4) y = stats.gumbel_l.cdf(x) xx = stats.gumbel_l.ppf(y) assert_allclose(x, xx) def test_logcdf_logsf(self): x = np.linspace(-100, -4) y = stats.gumbel_l.logcdf(x) z = stats.gumbel_l.logsf(x) u = np.exp(y) v = -special.expm1(z) assert_allclose(u, v) def test_sf_isf(self): x = np.linspace(-20, 5) y = stats.gumbel_l.sf(x) xx = stats.gumbel_l.isf(y) assert_allclose(x, xx) class TestArrayArgument(object): # test for ticket:992 def setup_method(self): np.random.seed(1234) def test_noexception(self): rvs = stats.norm.rvs(loc=(np.arange(5)), scale=np.ones(5), size=(10, 5)) assert_equal(rvs.shape, (10, 5)) class TestDocstring(object): def test_docstrings(self): # See ticket #761 if stats.rayleigh.__doc__ is not None: assert_("rayleigh" in stats.rayleigh.__doc__.lower()) if stats.bernoulli.__doc__ is not None: assert_("bernoulli" in stats.bernoulli.__doc__.lower()) def test_no_name_arg(self): # If name is not given, construction shouldn't fail. See #1508. stats.rv_continuous() stats.rv_discrete() class TestEntropy(object): def test_entropy_positive(self): # See ticket #497 pk = [0.5, 0.2, 0.3] qk = [0.1, 0.25, 0.65] eself = stats.entropy(pk, pk) edouble = stats.entropy(pk, qk) assert_(0.0 == eself) assert_(edouble >= 0.0) def test_entropy_base(self): pk = np.ones(16, float) S = stats.entropy(pk, base=2.) assert_(abs(S - 4.) < 1.e-5) qk = np.ones(16, float) qk[:8] = 2. S = stats.entropy(pk, qk) S2 = stats.entropy(pk, qk, base=2.) assert_(abs(S/S2 - np.log(2.)) < 1.e-5) def test_entropy_zero(self): # Test for PR-479 assert_almost_equal(stats.entropy([0, 1, 2]), 0.63651416829481278, decimal=12) def test_entropy_2d(self): pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] qk = [[0.2, 0.1], [0.3, 0.6], [0.5, 0.3]] assert_array_almost_equal(stats.entropy(pk, qk), [0.1933259, 0.18609809]) def test_entropy_2d_zero(self): pk = [[0.1, 0.2], [0.6, 0.3], [0.3, 0.5]] qk = [[0.0, 0.1], [0.3, 0.6], [0.5, 0.3]] assert_array_almost_equal(stats.entropy(pk, qk), [np.inf, 0.18609809]) pk[0][0] = 0.0 assert_array_almost_equal(stats.entropy(pk, qk), [0.17403988, 0.18609809]) def TestArgsreduce(): a = array([1, 3, 2, 1, 2, 3, 3]) b, c = argsreduce(a > 1, a, 2) assert_array_equal(b, [3, 2, 2, 3, 3]) assert_array_equal(c, [2, 2, 2, 2, 2]) b, c = argsreduce(2 > 1, a, 2) assert_array_equal(b, a[0]) assert_array_equal(c, [2]) b, c = argsreduce(a > 0, a, 2) assert_array_equal(b, a) assert_array_equal(c, [2] * numpy.size(a)) class TestFitMethod(object): skip = ['ncf'] def setup_method(self): np.random.seed(1234) @pytest.mark.slow @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) def test_fit(self, dist, args, alpha): if dist in self.skip: pytest.skip("%s fit known to fail" % dist) distfunc = getattr(stats, dist) with np.errstate(all='ignore'), suppress_warnings() as sup: sup.filter(category=DeprecationWarning, message=".*frechet_") res = distfunc.rvs(*args, **{'size': 200}) vals = distfunc.fit(res) vals2 = distfunc.fit(res, optimizer='powell') # Only check the length of the return # FIXME: should check the actual results to see if we are 'close' # to what was created --- but what is 'close' enough assert_(len(vals) == 2+len(args)) assert_(len(vals2) == 2+len(args)) @pytest.mark.slow @pytest.mark.parametrize('dist,args,alpha', cases_test_all_distributions()) def test_fix_fit(self, dist, args, alpha): # Not sure why 'ncf', and 'beta' are failing # frechet has different len(args) than distfunc.numargs if dist in self.skip + ['frechet']: pytest.skip("%s fit known to fail" % dist) distfunc = getattr(stats, dist) with np.errstate(all='ignore'), suppress_warnings() as sup: sup.filter(category=DeprecationWarning, message=".*frechet_") res = distfunc.rvs(*args, **{'size': 200}) vals = distfunc.fit(res, floc=0) vals2 = distfunc.fit(res, fscale=1) assert_(len(vals) == 2+len(args)) assert_(vals[-2] == 0) assert_(vals2[-1] == 1) assert_(len(vals2) == 2+len(args)) if len(args) > 0: vals3 = distfunc.fit(res, f0=args[0]) assert_(len(vals3) == 2+len(args)) assert_(vals3[0] == args[0]) if len(args) > 1: vals4 = distfunc.fit(res, f1=args[1]) assert_(len(vals4) == 2+len(args)) assert_(vals4[1] == args[1]) if len(args) > 2: vals5 = distfunc.fit(res, f2=args[2]) assert_(len(vals5) == 2+len(args)) assert_(vals5[2] == args[2]) def test_fix_fit_2args_lognorm(self): # Regression test for #1551. np.random.seed(12345) with np.errstate(all='ignore'): x = stats.lognorm.rvs(0.25, 0., 20.0, size=20) expected_shape = np.sqrt(((np.log(x) - np.log(20))**2).mean()) assert_allclose(np.array(stats.lognorm.fit(x, floc=0, fscale=20)), [expected_shape, 0, 20], atol=1e-8) def test_fix_fit_norm(self): x = np.arange(1, 6) loc, scale = stats.norm.fit(x) assert_almost_equal(loc, 3) assert_almost_equal(scale, np.sqrt(2)) loc, scale = stats.norm.fit(x, floc=2) assert_equal(loc, 2) assert_equal(scale, np.sqrt(3)) loc, scale = stats.norm.fit(x, fscale=2) assert_almost_equal(loc, 3) assert_equal(scale, 2) def test_fix_fit_gamma(self): x = np.arange(1, 6) meanlog = np.log(x).mean() # A basic test of gamma.fit with floc=0. floc = 0 a, loc, scale = stats.gamma.fit(x, floc=floc) s = np.log(x.mean()) - meanlog assert_almost_equal(np.log(a) - special.digamma(a), s, decimal=5) assert_equal(loc, floc) assert_almost_equal(scale, x.mean()/a, decimal=8) # Regression tests for gh-2514. # The problem was that if `floc=0` was given, any other fixed # parameters were ignored. f0 = 1 floc = 0 a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) assert_equal(a, f0) assert_equal(loc, floc) assert_almost_equal(scale, x.mean()/a, decimal=8) f0 = 2 floc = 0 a, loc, scale = stats.gamma.fit(x, f0=f0, floc=floc) assert_equal(a, f0) assert_equal(loc, floc) assert_almost_equal(scale, x.mean()/a, decimal=8) # loc and scale fixed. floc = 0 fscale = 2 a, loc, scale = stats.gamma.fit(x, floc=floc, fscale=fscale) assert_equal(loc, floc) assert_equal(scale, fscale) c = meanlog - np.log(fscale) assert_almost_equal(special.digamma(a), c) def test_fix_fit_beta(self): # Test beta.fit when both floc and fscale are given. def mlefunc(a, b, x): # Zeros of this function are critical points of # the maximum likelihood function. n = len(x) s1 = np.log(x).sum() s2 = np.log(1-x).sum() psiab = special.psi(a + b) func = [s1 - n * (-psiab + special.psi(a)), s2 - n * (-psiab + special.psi(b))] return func # Basic test with floc and fscale given. x = np.array([0.125, 0.25, 0.5]) a, b, loc, scale = stats.beta.fit(x, floc=0, fscale=1) assert_equal(loc, 0) assert_equal(scale, 1) assert_allclose(mlefunc(a, b, x), [0, 0], atol=1e-6) # Basic test with f0, floc and fscale given. # This is also a regression test for gh-2514. x = np.array([0.125, 0.25, 0.5]) a, b, loc, scale = stats.beta.fit(x, f0=2, floc=0, fscale=1) assert_equal(a, 2) assert_equal(loc, 0) assert_equal(scale, 1) da, db = mlefunc(a, b, x) assert_allclose(db, 0, atol=1e-5) # Same floc and fscale values as above, but reverse the data # and fix b (f1). x2 = 1 - x a2, b2, loc2, scale2 = stats.beta.fit(x2, f1=2, floc=0, fscale=1) assert_equal(b2, 2) assert_equal(loc2, 0) assert_equal(scale2, 1) da, db = mlefunc(a2, b2, x2) assert_allclose(da, 0, atol=1e-5) # a2 of this test should equal b from above. assert_almost_equal(a2, b) # Check for detection of data out of bounds when floc and fscale # are given. assert_raises(ValueError, stats.beta.fit, x, floc=0.5, fscale=1) y = np.array([0, .5, 1]) assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1) assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f0=2) assert_raises(ValueError, stats.beta.fit, y, floc=0, fscale=1, f1=2) # Check that attempting to fix all the parameters raises a ValueError. assert_raises(ValueError, stats.beta.fit, y, f0=0, f1=1, floc=2, fscale=3) def test_expon_fit(self): x = np.array([2, 2, 4, 4, 4, 4, 4, 8]) loc, scale = stats.expon.fit(x) assert_equal(loc, 2) # x.min() assert_equal(scale, 2) # x.mean() - x.min() loc, scale = stats.expon.fit(x, fscale=3) assert_equal(loc, 2) # x.min() assert_equal(scale, 3) # fscale loc, scale = stats.expon.fit(x, floc=0) assert_equal(loc, 0) # floc assert_equal(scale, 4) # x.mean() - loc def test_lognorm_fit(self): x = np.array([1.5, 3, 10, 15, 23, 59]) lnxm1 = np.log(x - 1) shape, loc, scale = stats.lognorm.fit(x, floc=1) assert_allclose(shape, lnxm1.std(), rtol=1e-12) assert_equal(loc, 1) assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) shape, loc, scale = stats.lognorm.fit(x, floc=1, fscale=6) assert_allclose(shape, np.sqrt(((lnxm1 - np.log(6))**2).mean()), rtol=1e-12) assert_equal(loc, 1) assert_equal(scale, 6) shape, loc, scale = stats.lognorm.fit(x, floc=1, fix_s=0.75) assert_equal(shape, 0.75) assert_equal(loc, 1) assert_allclose(scale, np.exp(lnxm1.mean()), rtol=1e-12) def test_uniform_fit(self): x = np.array([1.0, 1.1, 1.2, 9.0]) loc, scale = stats.uniform.fit(x) assert_equal(loc, x.min()) assert_equal(scale, x.ptp()) loc, scale = stats.uniform.fit(x, floc=0) assert_equal(loc, 0) assert_equal(scale, x.max()) loc, scale = stats.uniform.fit(x, fscale=10) assert_equal(loc, 0) assert_equal(scale, 10) assert_raises(ValueError, stats.uniform.fit, x, floc=2.0) assert_raises(ValueError, stats.uniform.fit, x, fscale=5.0) def test_fshapes(self): # take a beta distribution, with shapes='a, b', and make sure that # fa is equivalent to f0, and fb is equivalent to f1 a, b = 3., 4. x = stats.beta.rvs(a, b, size=100, random_state=1234) res_1 = stats.beta.fit(x, f0=3.) res_2 = stats.beta.fit(x, fa=3.) assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) res_2 = stats.beta.fit(x, fix_a=3.) assert_allclose(res_1, res_2, atol=1e-12, rtol=1e-12) res_3 = stats.beta.fit(x, f1=4.) res_4 = stats.beta.fit(x, fb=4.) assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) res_4 = stats.beta.fit(x, fix_b=4.) assert_allclose(res_3, res_4, atol=1e-12, rtol=1e-12) # cannot specify both positional and named args at the same time assert_raises(ValueError, stats.beta.fit, x, fa=1, f0=2) # check that attempting to fix all parameters raises a ValueError assert_raises(ValueError, stats.beta.fit, x, fa=0, f1=1, floc=2, fscale=3) # check that specifying floc, fscale and fshapes works for # beta and gamma which override the generic fit method res_5 = stats.beta.fit(x, fa=3., floc=0, fscale=1) aa, bb, ll, ss = res_5 assert_equal([aa, ll, ss], [3., 0, 1]) # gamma distribution a = 3. data = stats.gamma.rvs(a, size=100) aa, ll, ss = stats.gamma.fit(data, fa=a) assert_equal(aa, a) def test_extra_params(self): # unknown parameters should raise rather than be silently ignored dist = stats.exponnorm data = dist.rvs(K=2, size=100) dct = dict(enikibeniki=-101) assert_raises(TypeError, dist.fit, data, **dct) class TestFrozen(object): def setup_method(self): np.random.seed(1234) # Test that a frozen distribution gives the same results as the original # object. # # Only tested for the normal distribution (with loc and scale specified) # and for the gamma distribution (with a shape parameter specified). def test_norm(self): dist = stats.norm frozen = stats.norm(loc=10.0, scale=3.0) result_f = frozen.pdf(20.0) result = dist.pdf(20.0, loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.cdf(20.0) result = dist.cdf(20.0, loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.ppf(0.25) result = dist.ppf(0.25, loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.isf(0.25) result = dist.isf(0.25, loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.sf(10.0) result = dist.sf(10.0, loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.median() result = dist.median(loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.mean() result = dist.mean(loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.var() result = dist.var(loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.std() result = dist.std(loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.entropy() result = dist.entropy(loc=10.0, scale=3.0) assert_equal(result_f, result) result_f = frozen.moment(2) result = dist.moment(2, loc=10.0, scale=3.0) assert_equal(result_f, result) assert_equal(frozen.a, dist.a) assert_equal(frozen.b, dist.b) def test_gamma(self): a = 2.0 dist = stats.gamma frozen = stats.gamma(a) result_f = frozen.pdf(20.0) result = dist.pdf(20.0, a) assert_equal(result_f, result) result_f = frozen.cdf(20.0) result = dist.cdf(20.0, a) assert_equal(result_f, result) result_f = frozen.ppf(0.25) result = dist.ppf(0.25, a) assert_equal(result_f, result) result_f = frozen.isf(0.25) result = dist.isf(0.25, a) assert_equal(result_f, result) result_f = frozen.sf(10.0) result = dist.sf(10.0, a) assert_equal(result_f, result) result_f = frozen.median() result = dist.median(a) assert_equal(result_f, result) result_f = frozen.mean() result = dist.mean(a) assert_equal(result_f, result) result_f = frozen.var() result = dist.var(a) assert_equal(result_f, result) result_f = frozen.std() result = dist.std(a) assert_equal(result_f, result) result_f = frozen.entropy() result = dist.entropy(a) assert_equal(result_f, result) result_f = frozen.moment(2) result = dist.moment(2, a) assert_equal(result_f, result) assert_equal(frozen.a, frozen.dist.a) assert_equal(frozen.b, frozen.dist.b) def test_regression_ticket_1293(self): # Create a frozen distribution. frozen = stats.lognorm(1) # Call one of its methods that does not take any keyword arguments. m1 = frozen.moment(2) # Now call a method that takes a keyword argument. frozen.stats(moments='mvsk') # Call moment(2) again. # After calling stats(), the following was raising an exception. # So this test passes if the following does not raise an exception. m2 = frozen.moment(2) # The following should also be true, of course. But it is not # the focus of this test. assert_equal(m1, m2) def test_ab(self): # test that the support of a frozen distribution # (i) remains frozen even if it changes for the original one # (ii) is actually correct if the shape parameters are such that # the values of [a, b] are not the default [0, inf] # take a genpareto as an example where the support # depends on the value of the shape parameter: # for c > 0: a, b = 0, inf # for c < 0: a, b = 0, -1/c rv = stats.genpareto(c=-0.1) a, b = rv.dist.a, rv.dist.b assert_equal([a, b], [0., 10.]) assert_equal([rv.a, rv.b], [0., 10.]) stats.genpareto.pdf(0, c=0.1) # this changes genpareto.b assert_equal([rv.dist.a, rv.dist.b], [a, b]) assert_equal([rv.a, rv.b], [a, b]) rv1 = stats.genpareto(c=0.1) assert_(rv1.dist is not rv.dist) def test_rv_frozen_in_namespace(self): # Regression test for gh-3522 assert_(hasattr(stats.distributions, 'rv_frozen')) def test_random_state(self): # only check that the random_state attribute exists, frozen = stats.norm() assert_(hasattr(frozen, 'random_state')) # ... that it can be set, frozen.random_state = 42 assert_equal(frozen.random_state.get_state(), np.random.RandomState(42).get_state()) # ... and that .rvs method accepts it as an argument rndm = np.random.RandomState(1234) frozen.rvs(size=8, random_state=rndm) def test_pickling(self): # test that a frozen instance pickles and unpickles # (this method is a clone of common_tests.check_pickling) beta = stats.beta(2.3098496451481823, 0.62687954300963677) poiss = stats.poisson(3.) sample = stats.rv_discrete(values=([0, 1, 2, 3], [0.1, 0.2, 0.3, 0.4])) for distfn in [beta, poiss, sample]: distfn.random_state = 1234 distfn.rvs(size=8) s = pickle.dumps(distfn) r0 = distfn.rvs(size=8) unpickled = pickle.loads(s) r1 = unpickled.rvs(size=8) assert_equal(r0, r1) # also smoke test some methods medians = [distfn.ppf(0.5), unpickled.ppf(0.5)] assert_equal(medians[0], medians[1]) assert_equal(distfn.cdf(medians[0]), unpickled.cdf(medians[1])) def test_expect(self): # smoke test the expect method of the frozen distribution # only take a gamma w/loc and scale and poisson with loc specified def func(x): return x gm = stats.gamma(a=2, loc=3, scale=4) gm_val = gm.expect(func, lb=1, ub=2, conditional=True) gamma_val = stats.gamma.expect(func, args=(2,), loc=3, scale=4, lb=1, ub=2, conditional=True) assert_allclose(gm_val, gamma_val) p = stats.poisson(3, loc=4) p_val = p.expect(func) poisson_val = stats.poisson.expect(func, args=(3,), loc=4) assert_allclose(p_val, poisson_val) class TestExpect(object): # Test for expect method. # # Uses normal distribution and beta distribution for finite bounds, and # hypergeom for discrete distribution with finite support def test_norm(self): v = stats.norm.expect(lambda x: (x-5)*(x-5), loc=5, scale=2) assert_almost_equal(v, 4, decimal=14) m = stats.norm.expect(lambda x: (x), loc=5, scale=2) assert_almost_equal(m, 5, decimal=14) lb = stats.norm.ppf(0.05, loc=5, scale=2) ub = stats.norm.ppf(0.95, loc=5, scale=2) prob90 = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub) assert_almost_equal(prob90, 0.9, decimal=14) prob90c = stats.norm.expect(lambda x: 1, loc=5, scale=2, lb=lb, ub=ub, conditional=True) assert_almost_equal(prob90c, 1., decimal=14) def test_beta(self): # case with finite support interval v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5), loc=5, scale=2) assert_almost_equal(v, 1./18., decimal=13) m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.) assert_almost_equal(m, 19/3., decimal=13) ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2) lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2) prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5., scale=2., lb=lb, ub=ub, conditional=False) assert_almost_equal(prob90, 0.9, decimal=13) prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5, scale=2, lb=lb, ub=ub, conditional=True) assert_almost_equal(prob90c, 1., decimal=13) def test_hypergeom(self): # test case with finite bounds # without specifying bounds m_true, v_true = stats.hypergeom.stats(20, 10, 8, loc=5.) m = stats.hypergeom.expect(lambda x: x, args=(20, 10, 8), loc=5.) assert_almost_equal(m, m_true, decimal=13) v = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), loc=5.) assert_almost_equal(v, v_true, decimal=14) # with bounds, bounds equal to shifted support v_bounds = stats.hypergeom.expect(lambda x: (x-9.)**2, args=(20, 10, 8), loc=5., lb=5, ub=13) assert_almost_equal(v_bounds, v_true, decimal=14) # drop boundary points prob_true = 1-stats.hypergeom.pmf([5, 13], 20, 10, 8, loc=5).sum() prob_bounds = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5., lb=6, ub=12) assert_almost_equal(prob_bounds, prob_true, decimal=13) # conditional prob_bc = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), loc=5., lb=6, ub=12, conditional=True) assert_almost_equal(prob_bc, 1, decimal=14) # check simple integral prob_b = stats.hypergeom.expect(lambda x: 1, args=(20, 10, 8), lb=0, ub=8) assert_almost_equal(prob_b, 1, decimal=13) def test_poisson(self): # poisson, use lower bound only prob_bounds = stats.poisson.expect(lambda x: 1, args=(2,), lb=3, conditional=False) prob_b_true = 1-stats.poisson.cdf(2, 2) assert_almost_equal(prob_bounds, prob_b_true, decimal=14) prob_lb = stats.poisson.expect(lambda x: 1, args=(2,), lb=2, conditional=True) assert_almost_equal(prob_lb, 1, decimal=14) def test_genhalflogistic(self): # genhalflogistic, changes upper bound of support in _argcheck # regression test for gh-2622 halflog = stats.genhalflogistic # check consistency when calling expect twice with the same input res1 = halflog.expect(args=(1.5,)) halflog.expect(args=(0.5,)) res2 = halflog.expect(args=(1.5,)) assert_almost_equal(res1, res2, decimal=14) def test_rice_overflow(self): # rice.pdf(999, 0.74) was inf since special.i0 silentyly overflows # check that using i0e fixes it assert_(np.isfinite(stats.rice.pdf(999, 0.74))) assert_(np.isfinite(stats.rice.expect(lambda x: 1, args=(0.74,)))) assert_(np.isfinite(stats.rice.expect(lambda x: 2, args=(0.74,)))) assert_(np.isfinite(stats.rice.expect(lambda x: 3, args=(0.74,)))) def test_logser(self): # test a discrete distribution with infinite support and loc p, loc = 0.3, 3 res_0 = stats.logser.expect(lambda k: k, args=(p,)) # check against the correct answer (sum of a geom series) assert_allclose(res_0, p / (p - 1.) / np.log(1. - p), atol=1e-15) # now check it with `loc` res_l = stats.logser.expect(lambda k: k, args=(p,), loc=loc) assert_allclose(res_l, res_0 + loc, atol=1e-15) def test_skellam(self): # Use a discrete distribution w/ bi-infinite support. Compute two first # moments and compare to known values (cf skellam.stats) p1, p2 = 18, 22 m1 = stats.skellam.expect(lambda x: x, args=(p1, p2)) m2 = stats.skellam.expect(lambda x: x**2, args=(p1, p2)) assert_allclose(m1, p1 - p2, atol=1e-12) assert_allclose(m2 - m1**2, p1 + p2, atol=1e-12) def test_randint(self): # Use a discrete distribution w/ parameter-dependent support, which # is larger than the default chunksize lo, hi = 0, 113 res = stats.randint.expect(lambda x: x, (lo, hi)) assert_allclose(res, sum(_ for _ in range(lo, hi)) / (hi - lo), atol=1e-15) def test_zipf(self): # Test that there is no infinite loop even if the sum diverges assert_warns(RuntimeWarning, stats.zipf.expect, lambda x: x**2, (2,)) def test_discrete_kwds(self): # check that discrete expect accepts keywords to control the summation n0 = stats.poisson.expect(lambda x: 1, args=(2,)) n1 = stats.poisson.expect(lambda x: 1, args=(2,), maxcount=1001, chunksize=32, tolerance=1e-8) assert_almost_equal(n0, n1, decimal=14) def test_moment(self): # test the .moment() method: compute a higher moment and compare to # a known value def poiss_moment5(mu): return mu**5 + 10*mu**4 + 25*mu**3 + 15*mu**2 + mu for mu in [5, 7]: m5 = stats.poisson.moment(5, mu) assert_allclose(m5, poiss_moment5(mu), rtol=1e-10) class TestNct(object): def test_nc_parameter(self): # Parameter values c<=0 were not enabled (gh-2402). # For negative values c and for c=0 results of rv.cdf(0) below were nan rv = stats.nct(5, 0) assert_equal(rv.cdf(0), 0.5) rv = stats.nct(5, -1) assert_almost_equal(rv.cdf(0), 0.841344746069, decimal=10) def test_broadcasting(self): res = stats.nct.pdf(5, np.arange(4, 7)[:, None], np.linspace(0.1, 1, 4)) expected = array([[0.00321886, 0.00557466, 0.00918418, 0.01442997], [0.00217142, 0.00395366, 0.00683888, 0.01126276], [0.00153078, 0.00291093, 0.00525206, 0.00900815]]) assert_allclose(res, expected, rtol=1e-5) def test_variance_gh_issue_2401(self): # Computation of the variance of a non-central t-distribution resulted # in a TypeError: ufunc 'isinf' not supported for the input types, # and the inputs could not be safely coerced to any supported types # according to the casting rule 'safe' rv = stats.nct(4, 0) assert_equal(rv.var(), 2.0) def test_nct_inf_moments(self): # n-th moment of nct only exists for df > n m, v, s, k = stats.nct.stats(df=1.9, nc=0.3, moments='mvsk') assert_(np.isfinite(m)) assert_equal([v, s, k], [np.inf, np.nan, np.nan]) m, v, s, k = stats.nct.stats(df=3.1, nc=0.3, moments='mvsk') assert_(np.isfinite([m, v, s]).all()) assert_equal(k, np.nan) class TestRice(object): def test_rice_zero_b(self): # rice distribution should work with b=0, cf gh-2164 x = [0.2, 1., 5.] assert_(np.isfinite(stats.rice.pdf(x, b=0.)).all()) assert_(np.isfinite(stats.rice.logpdf(x, b=0.)).all()) assert_(np.isfinite(stats.rice.cdf(x, b=0.)).all()) assert_(np.isfinite(stats.rice.logcdf(x, b=0.)).all()) q = [0.1, 0.1, 0.5, 0.9] assert_(np.isfinite(stats.rice.ppf(q, b=0.)).all()) mvsk = stats.rice.stats(0, moments='mvsk') assert_(np.isfinite(mvsk).all()) # furthermore, pdf is continuous as b\to 0 # rice.pdf(x, b\to 0) = x exp(-x^2/2) + O(b^2) # see e.g. Abramovich & Stegun 9.6.7 & 9.6.10 b = 1e-8 assert_allclose(stats.rice.pdf(x, 0), stats.rice.pdf(x, b), atol=b, rtol=0) def test_rice_rvs(self): rvs = stats.rice.rvs assert_equal(rvs(b=3.).size, 1) assert_equal(rvs(b=3., size=(3, 5)).shape, (3, 5)) class TestErlang(object): def setup_method(self): np.random.seed(1234) def test_erlang_runtimewarning(self): # erlang should generate a RuntimeWarning if a non-integer # shape parameter is used. with warnings.catch_warnings(): warnings.simplefilter("error", RuntimeWarning) # The non-integer shape parameter 1.3 should trigger a # RuntimeWarning assert_raises(RuntimeWarning, stats.erlang.rvs, 1.3, loc=0, scale=1, size=4) # Calling the fit method with `f0` set to an integer should # *not* trigger a RuntimeWarning. It should return the same # values as gamma.fit(...). data = [0.5, 1.0, 2.0, 4.0] result_erlang = stats.erlang.fit(data, f0=1) result_gamma = stats.gamma.fit(data, f0=1) assert_allclose(result_erlang, result_gamma, rtol=1e-3) class TestRayleigh(object): # gh-6227 def test_logpdf(self): y = stats.rayleigh.logpdf(50) assert_allclose(y, -1246.0879769945718) def test_logsf(self): y = stats.rayleigh.logsf(50) assert_allclose(y, -1250) class TestExponWeib(object): def test_pdf_logpdf(self): # Regression test for gh-3508. x = 0.1 a = 1.0 c = 100.0 p = stats.exponweib.pdf(x, a, c) logp = stats.exponweib.logpdf(x, a, c) # Expected values were computed with mpmath. assert_allclose([p, logp], [1.0000000000000054e-97, -223.35075402042244]) def test_a_is_1(self): # For issue gh-3508. # Check that when a=1, the pdf and logpdf methods of exponweib are the # same as those of weibull_min. x = np.logspace(-4, -1, 4) a = 1 c = 100 p = stats.exponweib.pdf(x, a, c) expected = stats.weibull_min.pdf(x, c) assert_allclose(p, expected) logp = stats.exponweib.logpdf(x, a, c) expected = stats.weibull_min.logpdf(x, c) assert_allclose(logp, expected) def test_a_is_1_c_is_1(self): # When a = 1 and c = 1, the distribution is exponential. x = np.logspace(-8, 1, 10) a = 1 c = 1 p = stats.exponweib.pdf(x, a, c) expected = stats.expon.pdf(x) assert_allclose(p, expected) logp = stats.exponweib.logpdf(x, a, c) expected = stats.expon.logpdf(x) assert_allclose(logp, expected) class TestWeibull(object): def test_logpdf(self): # gh-6217 y = stats.weibull_min.logpdf(0, 1) assert_equal(y, 0) def test_with_maxima_distrib(self): # Tests for weibull_min and weibull_max. # The expected values were computed using the symbolic algebra # program 'maxima' with the package 'distrib', which has # 'pdf_weibull' and 'cdf_weibull'. The mapping between the # scipy and maxima functions is as follows: # ----------------------------------------------------------------- # scipy maxima # --------------------------------- ------------------------------ # weibull_min.pdf(x, a, scale=b) pdf_weibull(x, a, b) # weibull_min.logpdf(x, a, scale=b) log(pdf_weibull(x, a, b)) # weibull_min.cdf(x, a, scale=b) cdf_weibull(x, a, b) # weibull_min.logcdf(x, a, scale=b) log(cdf_weibull(x, a, b)) # weibull_min.sf(x, a, scale=b) 1 - cdf_weibull(x, a, b) # weibull_min.logsf(x, a, scale=b) log(1 - cdf_weibull(x, a, b)) # # weibull_max.pdf(x, a, scale=b) pdf_weibull(-x, a, b) # weibull_max.logpdf(x, a, scale=b) log(pdf_weibull(-x, a, b)) # weibull_max.cdf(x, a, scale=b) 1 - cdf_weibull(-x, a, b) # weibull_max.logcdf(x, a, scale=b) log(1 - cdf_weibull(-x, a, b)) # weibull_max.sf(x, a, scale=b) cdf_weibull(-x, a, b) # weibull_max.logsf(x, a, scale=b) log(cdf_weibull(-x, a, b)) # ----------------------------------------------------------------- x = 1.5 a = 2.0 b = 3.0 # weibull_min p = stats.weibull_min.pdf(x, a, scale=b) assert_allclose(p, np.exp(-0.25)/3) lp = stats.weibull_min.logpdf(x, a, scale=b) assert_allclose(lp, -0.25 - np.log(3)) c = stats.weibull_min.cdf(x, a, scale=b) assert_allclose(c, -special.expm1(-0.25)) lc = stats.weibull_min.logcdf(x, a, scale=b) assert_allclose(lc, np.log(-special.expm1(-0.25))) s = stats.weibull_min.sf(x, a, scale=b) assert_allclose(s, np.exp(-0.25)) ls = stats.weibull_min.logsf(x, a, scale=b) assert_allclose(ls, -0.25) # Also test using a large value x, for which computing the survival # function using the CDF would result in 0. s = stats.weibull_min.sf(30, 2, scale=3) assert_allclose(s, np.exp(-100)) ls = stats.weibull_min.logsf(30, 2, scale=3) assert_allclose(ls, -100) # weibull_max x = -1.5 p = stats.weibull_max.pdf(x, a, scale=b) assert_allclose(p, np.exp(-0.25)/3) lp = stats.weibull_max.logpdf(x, a, scale=b) assert_allclose(lp, -0.25 - np.log(3)) c = stats.weibull_max.cdf(x, a, scale=b) assert_allclose(c, np.exp(-0.25)) lc = stats.weibull_max.logcdf(x, a, scale=b) assert_allclose(lc, -0.25) s = stats.weibull_max.sf(x, a, scale=b) assert_allclose(s, -special.expm1(-0.25)) ls = stats.weibull_max.logsf(x, a, scale=b) assert_allclose(ls, np.log(-special.expm1(-0.25))) # Also test using a value of x close to 0, for which computing the # survival function using the CDF would result in 0. s = stats.weibull_max.sf(-1e-9, 2, scale=3) assert_allclose(s, -special.expm1(-1/9000000000000000000)) ls = stats.weibull_max.logsf(-1e-9, 2, scale=3) assert_allclose(ls, np.log(-special.expm1(-1/9000000000000000000))) class TestRdist(object): @pytest.mark.slow def test_rdist_cdf_gh1285(self): # check workaround in rdist._cdf for issue gh-1285. distfn = stats.rdist values = [0.001, 0.5, 0.999] assert_almost_equal(distfn.cdf(distfn.ppf(values, 541.0), 541.0), values, decimal=5) class TestTrapz(object): def test_reduces_to_triang(self): modes = [0, 0.3, 0.5, 1] for mode in modes: x = [0, mode, 1] assert_almost_equal(stats.trapz.pdf(x, mode, mode), stats.triang.pdf(x, mode)) assert_almost_equal(stats.trapz.cdf(x, mode, mode), stats.triang.cdf(x, mode)) def test_reduces_to_uniform(self): x = np.linspace(0, 1, 10) assert_almost_equal(stats.trapz.pdf(x, 0, 1), stats.uniform.pdf(x)) assert_almost_equal(stats.trapz.cdf(x, 0, 1), stats.uniform.cdf(x)) def test_cases(self): # edge cases assert_almost_equal(stats.trapz.pdf(0, 0, 0), 2) assert_almost_equal(stats.trapz.pdf(1, 1, 1), 2) assert_almost_equal(stats.trapz.pdf(0.5, 0, 0.8), 1.11111111111111111) assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 1.0), 1.11111111111111111) # straightforward case assert_almost_equal(stats.trapz.pdf(0.1, 0.2, 0.8), 0.625) assert_almost_equal(stats.trapz.pdf(0.5, 0.2, 0.8), 1.25) assert_almost_equal(stats.trapz.pdf(0.9, 0.2, 0.8), 0.625) assert_almost_equal(stats.trapz.cdf(0.1, 0.2, 0.8), 0.03125) assert_almost_equal(stats.trapz.cdf(0.2, 0.2, 0.8), 0.125) assert_almost_equal(stats.trapz.cdf(0.5, 0.2, 0.8), 0.5) assert_almost_equal(stats.trapz.cdf(0.9, 0.2, 0.8), 0.96875) assert_almost_equal(stats.trapz.cdf(1.0, 0.2, 0.8), 1.0) def test_trapz_vect(self): # test that array-valued shapes and arguments are handled c = np.array([0.1, 0.2, 0.3]) d = np.array([0.5, 0.6])[:, None] x = np.array([0.15, 0.25, 0.9]) v = stats.trapz.pdf(x, c, d) cc, dd, xx = np.broadcast_arrays(c, d, x) res = np.empty(xx.size, dtype=xx.dtype) ind = np.arange(xx.size) for i, x1, c1, d1 in zip(ind, xx.ravel(), cc.ravel(), dd.ravel()): res[i] = stats.trapz.pdf(x1, c1, d1) assert_allclose(v, res.reshape(v.shape), atol=1e-15) class TestTriang(object): def test_edge_cases(self): with np.errstate(all='raise'): assert_equal(stats.triang.pdf(0, 0), 2.) assert_equal(stats.triang.pdf(0.5, 0), 1.) assert_equal(stats.triang.pdf(1, 0), 0.) assert_equal(stats.triang.pdf(0, 1), 0) assert_equal(stats.triang.pdf(0.5, 1), 1.) assert_equal(stats.triang.pdf(1, 1), 2) assert_equal(stats.triang.cdf(0., 0.), 0.) assert_equal(stats.triang.cdf(0.5, 0.), 0.75) assert_equal(stats.triang.cdf(1.0, 0.), 1.0) assert_equal(stats.triang.cdf(0., 1.), 0.) assert_equal(stats.triang.cdf(0.5, 1.), 0.25) assert_equal(stats.triang.cdf(1., 1.), 1) def test_540_567(): # test for nan returned in tickets 540, 567 assert_almost_equal(stats.norm.cdf(-1.7624320982), 0.03899815971089126, decimal=10, err_msg='test_540_567') assert_almost_equal(stats.norm.cdf(-1.7624320983), 0.038998159702449846, decimal=10, err_msg='test_540_567') assert_almost_equal(stats.norm.cdf(1.38629436112, loc=0.950273420309, scale=0.204423758009), 0.98353464004309321, decimal=10, err_msg='test_540_567') def test_regression_ticket_1316(): # The following was raising an exception, because _construct_default_doc() # did not handle the default keyword extradoc=None. See ticket #1316. g = stats._continuous_distns.gamma_gen(name='gamma') def test_regression_ticket_1326(): # adjust to avoid nan with 0*log(0) assert_almost_equal(stats.chi2.pdf(0.0, 2), 0.5, 14) def test_regression_tukey_lambda(): # Make sure that Tukey-Lambda distribution correctly handles # non-positive lambdas. x = np.linspace(-5.0, 5.0, 101) olderr = np.seterr(divide='ignore') try: for lam in [0.0, -1.0, -2.0, np.array([[-1.0], [0.0], [-2.0]])]: p = stats.tukeylambda.pdf(x, lam) assert_((p != 0.0).all()) assert_(~np.isnan(p).all()) lam = np.array([[-1.0], [0.0], [2.0]]) p = stats.tukeylambda.pdf(x, lam) finally: np.seterr(**olderr) assert_(~np.isnan(p).all()) assert_((p[0] != 0.0).all()) assert_((p[1] != 0.0).all()) assert_((p[2] != 0.0).any()) assert_((p[2] == 0.0).any()) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") def test_regression_ticket_1421(): assert_('pdf(x, mu, loc=0, scale=1)' not in stats.poisson.__doc__) assert_('pmf(x,' in stats.poisson.__doc__) def test_nan_arguments_gh_issue_1362(): with np.errstate(invalid='ignore'): assert_(np.isnan(stats.t.logcdf(1, np.nan))) assert_(np.isnan(stats.t.cdf(1, np.nan))) assert_(np.isnan(stats.t.logsf(1, np.nan))) assert_(np.isnan(stats.t.sf(1, np.nan))) assert_(np.isnan(stats.t.pdf(1, np.nan))) assert_(np.isnan(stats.t.logpdf(1, np.nan))) assert_(np.isnan(stats.t.ppf(1, np.nan))) assert_(np.isnan(stats.t.isf(1, np.nan))) assert_(np.isnan(stats.bernoulli.logcdf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.cdf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.logsf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.sf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.pmf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.logpmf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.ppf(np.nan, 0.5))) assert_(np.isnan(stats.bernoulli.isf(np.nan, 0.5))) def test_frozen_fit_ticket_1536(): np.random.seed(5678) true = np.array([0.25, 0., 0.5]) x = stats.lognorm.rvs(true[0], true[1], true[2], size=100) olderr = np.seterr(divide='ignore') try: params = np.array(stats.lognorm.fit(x, floc=0.)) finally: np.seterr(**olderr) assert_almost_equal(params, true, decimal=2) params = np.array(stats.lognorm.fit(x, fscale=0.5, loc=0)) assert_almost_equal(params, true, decimal=2) params = np.array(stats.lognorm.fit(x, f0=0.25, loc=0)) assert_almost_equal(params, true, decimal=2) params = np.array(stats.lognorm.fit(x, f0=0.25, floc=0)) assert_almost_equal(params, true, decimal=2) np.random.seed(5678) loc = 1 floc = 0.9 x = stats.norm.rvs(loc, 2., size=100) params = np.array(stats.norm.fit(x, floc=floc)) expected = np.array([floc, np.sqrt(((x-floc)**2).mean())]) assert_almost_equal(params, expected, decimal=4) def test_regression_ticket_1530(): # Check the starting value works for Cauchy distribution fit. np.random.seed(654321) rvs = stats.cauchy.rvs(size=100) params = stats.cauchy.fit(rvs) expected = (0.045, 1.142) assert_almost_equal(params, expected, decimal=1) def test_gh_pr_4806(): # Check starting values for Cauchy distribution fit. np.random.seed(1234) x = np.random.randn(42) for offset in 10000.0, 1222333444.0: loc, scale = stats.cauchy.fit(x + offset) assert_allclose(loc, offset, atol=1.0) assert_allclose(scale, 0.6, atol=1.0) def test_tukeylambda_stats_ticket_1545(): # Some test for the variance and kurtosis of the Tukey Lambda distr. # See test_tukeylamdba_stats.py for more tests. mv = stats.tukeylambda.stats(0, moments='mvsk') # Known exact values: expected = [0, np.pi**2/3, 0, 1.2] assert_almost_equal(mv, expected, decimal=10) mv = stats.tukeylambda.stats(3.13, moments='mvsk') # 'expected' computed with mpmath. expected = [0, 0.0269220858861465102, 0, -0.898062386219224104] assert_almost_equal(mv, expected, decimal=10) mv = stats.tukeylambda.stats(0.14, moments='mvsk') # 'expected' computed with mpmath. expected = [0, 2.11029702221450250, 0, -0.02708377353223019456] assert_almost_equal(mv, expected, decimal=10) def test_poisson_logpmf_ticket_1436(): assert_(np.isfinite(stats.poisson.logpmf(1500, 200))) def test_powerlaw_stats(): """Test the powerlaw stats function. This unit test is also a regression test for ticket 1548. The exact values are: mean: mu = a / (a + 1) variance: sigma**2 = a / ((a + 2) * (a + 1) ** 2) skewness: One formula (see http://en.wikipedia.org/wiki/Skewness) is gamma_1 = (E[X**3] - 3*mu*E[X**2] + 2*mu**3) / sigma**3 A short calculation shows that E[X**k] is a / (a + k), so gamma_1 can be implemented as n = a/(a+3) - 3*(a/(a+1))*a/(a+2) + 2*(a/(a+1))**3 d = sqrt(a/((a+2)*(a+1)**2)) ** 3 gamma_1 = n/d Either by simplifying, or by a direct calculation of mu_3 / sigma**3, one gets the more concise formula: gamma_1 = -2.0 * ((a - 1) / (a + 3)) * sqrt((a + 2) / a) kurtosis: (See http://en.wikipedia.org/wiki/Kurtosis) The excess kurtosis is gamma_2 = mu_4 / sigma**4 - 3 A bit of calculus and algebra (sympy helps) shows that mu_4 = 3*a*(3*a**2 - a + 2) / ((a+1)**4 * (a+2) * (a+3) * (a+4)) so gamma_2 = 3*(3*a**2 - a + 2) * (a+2) / (a*(a+3)*(a+4)) - 3 which can be rearranged to gamma_2 = 6 * (a**3 - a**2 - 6*a + 2) / (a*(a+3)*(a+4)) """ cases = [(1.0, (0.5, 1./12, 0.0, -1.2)), (2.0, (2./3, 2./36, -0.56568542494924734, -0.6))] for a, exact_mvsk in cases: mvsk = stats.powerlaw.stats(a, moments="mvsk") assert_array_almost_equal(mvsk, exact_mvsk) def test_powerlaw_edge(): # Regression test for gh-3986. p = stats.powerlaw.logpdf(0, 1) assert_equal(p, 0.0) def test_exponpow_edge(): # Regression test for gh-3982. p = stats.exponpow.logpdf(0, 1) assert_equal(p, 0.0) # Check pdf and logpdf at x = 0 for other values of b. p = stats.exponpow.pdf(0, [0.25, 1.0, 1.5]) assert_equal(p, [np.inf, 1.0, 0.0]) p = stats.exponpow.logpdf(0, [0.25, 1.0, 1.5]) assert_equal(p, [np.inf, 0.0, -np.inf]) def test_gengamma_edge(): # Regression test for gh-3985. p = stats.gengamma.pdf(0, 1, 1) assert_equal(p, 1.0) # Regression tests for gh-4724. p = stats.gengamma._munp(-2, 200, 1.) assert_almost_equal(p, 1./199/198) p = stats.gengamma._munp(-2, 10, 1.) assert_almost_equal(p, 1./9/8) def test_ksone_fit_freeze(): # Regression test for ticket #1638. d = np.array( [-0.18879233, 0.15734249, 0.18695107, 0.27908787, -0.248649, -0.2171497, 0.12233512, 0.15126419, 0.03119282, 0.4365294, 0.08930393, -0.23509903, 0.28231224, -0.09974875, -0.25196048, 0.11102028, 0.1427649, 0.10176452, 0.18754054, 0.25826724, 0.05988819, 0.0531668, 0.21906056, 0.32106729, 0.2117662, 0.10886442, 0.09375789, 0.24583286, -0.22968366, -0.07842391, -0.31195432, -0.21271196, 0.1114243, -0.13293002, 0.01331725, -0.04330977, -0.09485776, -0.28434547, 0.22245721, -0.18518199, -0.10943985, -0.35243174, 0.06897665, -0.03553363, -0.0701746, -0.06037974, 0.37670779, -0.21684405]) try: olderr = np.seterr(invalid='ignore') with suppress_warnings() as sup: sup.filter(IntegrationWarning, "The maximum number of subdivisions .50. has been achieved.") sup.filter(RuntimeWarning, "floating point number truncated to an integer") stats.ksone.fit(d) finally: np.seterr(**olderr) def test_norm_logcdf(): # Test precision of the logcdf of the normal distribution. # This precision was enhanced in ticket 1614. x = -np.asarray(list(range(0, 120, 4))) # Values from R expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300, -131.69539607, -203.91715537, -292.09872100, -396.25241451, -516.38564863, -652.50322759, -804.60844201, -972.70364403, -1156.79057310, -1356.87055173, -1572.94460885, -1805.01356068, -2053.07806561, -2317.13866238, -2597.19579746, -2893.24984493, -3205.30112136, -3533.34989701, -3877.39640444, -4237.44084522, -4613.48339520, -5005.52420869, -5413.56342187, -5837.60115548, -6277.63751711, -6733.67260303] assert_allclose(stats.norm().logcdf(x), expected, atol=1e-8) # also test the complex-valued code path assert_allclose(stats.norm().logcdf(x + 1e-14j).real, expected, atol=1e-8) # test the accuracy: d(logcdf)/dx = pdf / cdf \equiv exp(logpdf - logcdf) deriv = (stats.norm.logcdf(x + 1e-10j)/1e-10).imag deriv_expected = np.exp(stats.norm.logpdf(x) - stats.norm.logcdf(x)) assert_allclose(deriv, deriv_expected, atol=1e-10) def test_levy_cdf_ppf(): # Test levy.cdf, including small arguments. x = np.array([1000, 1.0, 0.5, 0.1, 0.01, 0.001]) # Expected values were calculated separately with mpmath. # E.g. # >>> mpmath.mp.dps = 100 # >>> x = mpmath.mp.mpf('0.01') # >>> cdf = mpmath.erfc(mpmath.sqrt(1/(2*x))) expected = np.array([0.9747728793699604, 0.3173105078629141, 0.1572992070502851, 0.0015654022580025495, 1.523970604832105e-23, 1.795832784800726e-219]) y = stats.levy.cdf(x) assert_allclose(y, expected, rtol=1e-10) # ppf(expected) should get us back to x. xx = stats.levy.ppf(expected) assert_allclose(xx, x, rtol=1e-13) def test_hypergeom_interval_1802(): # these two had endless loops assert_equal(stats.hypergeom.interval(.95, 187601, 43192, 757), (152.0, 197.0)) assert_equal(stats.hypergeom.interval(.945, 187601, 43192, 757), (152.0, 197.0)) # this was working also before assert_equal(stats.hypergeom.interval(.94, 187601, 43192, 757), (153.0, 196.0)) # degenerate case .a == .b assert_equal(stats.hypergeom.ppf(0.02, 100, 100, 8), 8) assert_equal(stats.hypergeom.ppf(1, 100, 100, 8), 8) def test_distribution_too_many_args(): np.random.seed(1234) # Check that a TypeError is raised when too many args are given to a method # Regression test for ticket 1815. x = np.linspace(0.1, 0.7, num=5) assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0) assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, loc=1.0) assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, 4, 5) assert_raises(TypeError, stats.gamma.pdf, x, 2, 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.rvs, 2., 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.cdf, x, 2., 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.ppf, x, 2., 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.stats, 2., 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.entropy, 2., 3, loc=1.0, scale=0.5) assert_raises(TypeError, stats.gamma.fit, x, 2., 3, loc=1.0, scale=0.5) # These should not give errors stats.gamma.pdf(x, 2, 3) # loc=3 stats.gamma.pdf(x, 2, 3, 4) # loc=3, scale=4 stats.gamma.stats(2., 3) stats.gamma.stats(2., 3, 4) stats.gamma.stats(2., 3, 4, 'mv') stats.gamma.rvs(2., 3, 4, 5) stats.gamma.fit(stats.gamma.rvs(2., size=7), 2.) # Also for a discrete distribution stats.geom.pmf(x, 2, loc=3) # no error, loc=3 assert_raises(TypeError, stats.geom.pmf, x, 2, 3, 4) assert_raises(TypeError, stats.geom.pmf, x, 2, 3, loc=4) # And for distributions with 0, 2 and 3 args respectively assert_raises(TypeError, stats.expon.pdf, x, 3, loc=1.0) assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, loc=1.0) assert_raises(TypeError, stats.exponweib.pdf, x, 3, 4, 5, 0.1, 0.1) assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, loc=1.0) assert_raises(TypeError, stats.ncf.pdf, x, 3, 4, 5, 6, 1.0, scale=0.5) stats.ncf.pdf(x, 3, 4, 5, 6, 1.0) # 3 args, plus loc/scale def test_ncx2_tails_ticket_955(): # Trac #955 -- check that the cdf computed by special functions # matches the integrated pdf a = stats.ncx2.cdf(np.arange(20, 25, 0.2), 2, 1.07458615e+02) b = stats.ncx2._cdfvec(np.arange(20, 25, 0.2), 2, 1.07458615e+02) assert_allclose(a, b, rtol=1e-3, atol=0) def test_ncx2_tails_pdf(): # ncx2.pdf does not return nans in extreme tails(example from gh-1577) # NB: this is to check that nan_to_num is not needed in ncx2.pdf with suppress_warnings() as sup: sup.filter(RuntimeWarning, "divide by zero encountered in log") assert_equal(stats.ncx2.pdf(1, np.arange(340, 350), 2), 0) logval = stats.ncx2.logpdf(1, np.arange(340, 350), 2) assert_(np.isneginf(logval).all()) def test_foldnorm_zero(): # Parameter value c=0 was not enabled, see gh-2399. rv = stats.foldnorm(0, scale=1) assert_equal(rv.cdf(0), 0) # rv.cdf(0) previously resulted in: nan def test_stats_shapes_argcheck(): # stats method was failing for vector shapes if some of the values # were outside of the allowed range, see gh-2678 mv3 = stats.invgamma.stats([0.0, 0.5, 1.0], 1, 0.5) # 0 is not a legal `a` mv2 = stats.invgamma.stats([0.5, 1.0], 1, 0.5) mv2_augmented = tuple(np.r_[np.nan, _] for _ in mv2) assert_equal(mv2_augmented, mv3) # -1 is not a legal shape parameter mv3 = stats.lognorm.stats([2, 2.4, -1]) mv2 = stats.lognorm.stats([2, 2.4]) mv2_augmented = tuple(np.r_[_, np.nan] for _ in mv2) assert_equal(mv2_augmented, mv3) # FIXME: this is only a quick-and-dirty test of a quick-and-dirty bugfix. # stats method with multiple shape parameters is not properly vectorized # anyway, so some distributions may or may not fail. # Test subclassing distributions w/ explicit shapes class _distr_gen(stats.rv_continuous): def _pdf(self, x, a): return 42 class _distr2_gen(stats.rv_continuous): def _cdf(self, x, a): return 42 * a + x class _distr3_gen(stats.rv_continuous): def _pdf(self, x, a, b): return a + b def _cdf(self, x, a): # Different # of shape params from _pdf, to be able to check that # inspection catches the inconsistency.""" return 42 * a + x class _distr6_gen(stats.rv_continuous): # Two shape parameters (both _pdf and _cdf defined, consistent shapes.) def _pdf(self, x, a, b): return a*x + b def _cdf(self, x, a, b): return 42 * a + x class TestSubclassingExplicitShapes(object): # Construct a distribution w/ explicit shapes parameter and test it. def test_correct_shapes(self): dummy_distr = _distr_gen(name='dummy', shapes='a') assert_equal(dummy_distr.pdf(1, a=1), 42) def test_wrong_shapes_1(self): dummy_distr = _distr_gen(name='dummy', shapes='A') assert_raises(TypeError, dummy_distr.pdf, 1, **dict(a=1)) def test_wrong_shapes_2(self): dummy_distr = _distr_gen(name='dummy', shapes='a, b, c') dct = dict(a=1, b=2, c=3) assert_raises(TypeError, dummy_distr.pdf, 1, **dct) def test_shapes_string(self): # shapes must be a string dct = dict(name='dummy', shapes=42) assert_raises(TypeError, _distr_gen, **dct) def test_shapes_identifiers_1(self): # shapes must be a comma-separated list of valid python identifiers dct = dict(name='dummy', shapes='(!)') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_identifiers_2(self): dct = dict(name='dummy', shapes='4chan') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_identifiers_3(self): dct = dict(name='dummy', shapes='m(fti)') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_identifiers_nodefaults(self): dct = dict(name='dummy', shapes='a=2') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_args(self): dct = dict(name='dummy', shapes='*args') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_kwargs(self): dct = dict(name='dummy', shapes='**kwargs') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_keywords(self): # python keywords cannot be used for shape parameters dct = dict(name='dummy', shapes='a, b, c, lambda') assert_raises(SyntaxError, _distr_gen, **dct) def test_shapes_signature(self): # test explicit shapes which agree w/ the signature of _pdf class _dist_gen(stats.rv_continuous): def _pdf(self, x, a): return stats.norm._pdf(x) * a dist = _dist_gen(shapes='a') assert_equal(dist.pdf(0.5, a=2), stats.norm.pdf(0.5)*2) def test_shapes_signature_inconsistent(self): # test explicit shapes which do not agree w/ the signature of _pdf class _dist_gen(stats.rv_continuous): def _pdf(self, x, a): return stats.norm._pdf(x) * a dist = _dist_gen(shapes='a, b') assert_raises(TypeError, dist.pdf, 0.5, **dict(a=1, b=2)) def test_star_args(self): # test _pdf with only starargs # NB: **kwargs of pdf will never reach _pdf class _dist_gen(stats.rv_continuous): def _pdf(self, x, *args): extra_kwarg = args[0] return stats.norm._pdf(x) * extra_kwarg dist = _dist_gen(shapes='extra_kwarg') assert_equal(dist.pdf(0.5, extra_kwarg=33), stats.norm.pdf(0.5)*33) assert_equal(dist.pdf(0.5, 33), stats.norm.pdf(0.5)*33) assert_raises(TypeError, dist.pdf, 0.5, **dict(xxx=33)) def test_star_args_2(self): # test _pdf with named & starargs # NB: **kwargs of pdf will never reach _pdf class _dist_gen(stats.rv_continuous): def _pdf(self, x, offset, *args): extra_kwarg = args[0] return stats.norm._pdf(x) * extra_kwarg + offset dist = _dist_gen(shapes='offset, extra_kwarg') assert_equal(dist.pdf(0.5, offset=111, extra_kwarg=33), stats.norm.pdf(0.5)*33 + 111) assert_equal(dist.pdf(0.5, 111, 33), stats.norm.pdf(0.5)*33 + 111) def test_extra_kwarg(self): # **kwargs to _pdf are ignored. # this is a limitation of the framework (_pdf(x, *goodargs)) class _distr_gen(stats.rv_continuous): def _pdf(self, x, *args, **kwargs): # _pdf should handle *args, **kwargs itself. Here "handling" # is ignoring *args and looking for ``extra_kwarg`` and using # that. extra_kwarg = kwargs.pop('extra_kwarg', 1) return stats.norm._pdf(x) * extra_kwarg dist = _distr_gen(shapes='extra_kwarg') assert_equal(dist.pdf(1, extra_kwarg=3), stats.norm.pdf(1)) def shapes_empty_string(self): # shapes='' is equivalent to shapes=None class _dist_gen(stats.rv_continuous): def _pdf(self, x): return stats.norm.pdf(x) dist = _dist_gen(shapes='') assert_equal(dist.pdf(0.5), stats.norm.pdf(0.5)) class TestSubclassingNoShapes(object): # Construct a distribution w/o explicit shapes parameter and test it. def test_only__pdf(self): dummy_distr = _distr_gen(name='dummy') assert_equal(dummy_distr.pdf(1, a=1), 42) def test_only__cdf(self): # _pdf is determined from _cdf by taking numerical derivative dummy_distr = _distr2_gen(name='dummy') assert_almost_equal(dummy_distr.pdf(1, a=1), 1) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") def test_signature_inspection(self): # check that _pdf signature inspection works correctly, and is used in # the class docstring dummy_distr = _distr_gen(name='dummy') assert_equal(dummy_distr.numargs, 1) assert_equal(dummy_distr.shapes, 'a') res = re.findall(r'logpdf\(x, a, loc=0, scale=1\)', dummy_distr.__doc__) assert_(len(res) == 1) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") def test_signature_inspection_2args(self): # same for 2 shape params and both _pdf and _cdf defined dummy_distr = _distr6_gen(name='dummy') assert_equal(dummy_distr.numargs, 2) assert_equal(dummy_distr.shapes, 'a, b') res = re.findall(r'logpdf\(x, a, b, loc=0, scale=1\)', dummy_distr.__doc__) assert_(len(res) == 1) def test_signature_inspection_2args_incorrect_shapes(self): # both _pdf and _cdf defined, but shapes are inconsistent: raises try: _distr3_gen(name='dummy') except TypeError: pass else: raise AssertionError('TypeError not raised.') def test_defaults_raise(self): # default arguments should raise class _dist_gen(stats.rv_continuous): def _pdf(self, x, a=42): return 42 assert_raises(TypeError, _dist_gen, **dict(name='dummy')) def test_starargs_raise(self): # without explicit shapes, *args are not allowed class _dist_gen(stats.rv_continuous): def _pdf(self, x, a, *args): return 42 assert_raises(TypeError, _dist_gen, **dict(name='dummy')) def test_kwargs_raise(self): # without explicit shapes, **kwargs are not allowed class _dist_gen(stats.rv_continuous): def _pdf(self, x, a, **kwargs): return 42 assert_raises(TypeError, _dist_gen, **dict(name='dummy')) @pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstring stripped") def test_docstrings(): badones = [r',\s*,', r'\(\s*,', r'^\s*:'] for distname in stats.__all__: dist = getattr(stats, distname) if isinstance(dist, (stats.rv_discrete, stats.rv_continuous)): for regex in badones: assert_(re.search(regex, dist.__doc__) is None) def test_infinite_input(): assert_almost_equal(stats.skellam.sf(np.inf, 10, 11), 0) assert_almost_equal(stats.ncx2._cdf(np.inf, 8, 0.1), 1) def test_lomax_accuracy(): # regression test for gh-4033 p = stats.lomax.ppf(stats.lomax.cdf(1e-100, 1), 1) assert_allclose(p, 1e-100) def test_gompertz_accuracy(): # Regression test for gh-4031 p = stats.gompertz.ppf(stats.gompertz.cdf(1e-100, 1), 1) assert_allclose(p, 1e-100) def test_truncexpon_accuracy(): # regression test for gh-4035 p = stats.truncexpon.ppf(stats.truncexpon.cdf(1e-100, 1), 1) assert_allclose(p, 1e-100) def test_rayleigh_accuracy(): # regression test for gh-4034 p = stats.rayleigh.isf(stats.rayleigh.sf(9, 1), 1) assert_almost_equal(p, 9.0, decimal=15) def test_genextreme_give_no_warnings(): """regression test for gh-6219""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") p = stats.genextreme.cdf(.5, 0) p = stats.genextreme.pdf(.5, 0) p = stats.genextreme.ppf(.5, 0) p = stats.genextreme.logpdf(-np.inf, 0.0) number_of_warnings_thrown = len(w) assert_equal(number_of_warnings_thrown, 0) def test_genextreme_entropy(): # regression test for gh-5181 euler_gamma = 0.5772156649015329 h = stats.genextreme.entropy(-1.0) assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14) h = stats.genextreme.entropy(0) assert_allclose(h, euler_gamma + 1, rtol=1e-14) h = stats.genextreme.entropy(1.0) assert_equal(h, 1) h = stats.genextreme.entropy(-2.0, scale=10) assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14) h = stats.genextreme.entropy(10) assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14) h = stats.genextreme.entropy(-10) assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14) def test_genextreme_sf_isf(): # Expected values were computed using mpmath: # # import mpmath # # def mp_genextreme_sf(x, xi, mu=0, sigma=1): # # Formula from wikipedia, which has a sign convention for xi that # # is the opposite of scipy's shape parameter. # if xi != 0: # t = mpmath.power(1 + ((x - mu)/sigma)*xi, -1/xi) # else: # t = mpmath.exp(-(x - mu)/sigma) # return 1 - mpmath.exp(-t) # # >>> mpmath.mp.dps = 1000 # >>> s = mp_genextreme_sf(mpmath.mp.mpf("1e8"), mpmath.mp.mpf("0.125")) # >>> float(s) # 1.6777205262585625e-57 # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("-0.125")) # >>> float(s) # 1.52587890625e-21 # >>> s = mp_genextreme_sf(mpmath.mp.mpf("7.98"), mpmath.mp.mpf("0")) # >>> float(s) # 0.00034218086528426593 x = 1e8 s = stats.genextreme.sf(x, -0.125) assert_allclose(s, 1.6777205262585625e-57) x2 = stats.genextreme.isf(s, -0.125) assert_allclose(x2, x) x = 7.98 s = stats.genextreme.sf(x, 0.125) assert_allclose(s, 1.52587890625e-21) x2 = stats.genextreme.isf(s, 0.125) assert_allclose(x2, x) x = 7.98 s = stats.genextreme.sf(x, 0) assert_allclose(s, 0.00034218086528426593) x2 = stats.genextreme.isf(s, 0) assert_allclose(x2, x) def test_burr12_ppf_small_arg(): prob = 1e-16 quantile = stats.burr12.ppf(prob, 2, 3) # The expected quantile was computed using mpmath: # >>> import mpmath # >>> prob = mpmath.mpf('1e-16') # >>> c = mpmath.mpf(2) # >>> d = mpmath.mpf(3) # >>> float(((1-q)**(-1/d) - 1)**(1/c)) # 5.7735026918962575e-09 assert_allclose(quantile, 5.7735026918962575e-09) def test_crystalball_function(): """ All values are calculated using the independent implementation of the ROOT framework (see https://root.cern.ch/). Corresponding ROOT code is given in the comments. """ X = np.linspace(-5.0, 5.0, 21)[:-1] # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_pdf(x, 1.0, 2.0, 1.0) << ", "; calculated = stats.crystalball.pdf(X, beta=1.0, m=2.0) expected = np.array([0.0202867, 0.0241428, 0.0292128, 0.0360652, 0.045645, 0.059618, 0.0811467, 0.116851, 0.18258, 0.265652, 0.301023, 0.265652, 0.18258, 0.097728, 0.0407391, 0.013226, 0.00334407, 0.000658486, 0.000100982, 1.20606e-05]) assert_allclose(expected, calculated, rtol=0.001) # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 1.0) << ", "; calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0) expected = np.array([0.0019648, 0.00279754, 0.00417592, 0.00663121, 0.0114587, 0.0223803, 0.0530497, 0.12726, 0.237752, 0.345928, 0.391987, 0.345928, 0.237752, 0.12726, 0.0530497, 0.0172227, 0.00435458, 0.000857469, 0.000131497, 1.57051e-05]) assert_allclose(expected, calculated, rtol=0.001) # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_pdf(x, 2.0, 3.0, 2.0, 0.5) << ", "; calculated = stats.crystalball.pdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) expected = np.array([0.00785921, 0.0111902, 0.0167037, 0.0265249, 0.0423866, 0.0636298, 0.0897324, 0.118876, 0.147944, 0.172964, 0.189964, 0.195994, 0.189964, 0.172964, 0.147944, 0.118876, 0.0897324, 0.0636298, 0.0423866, 0.0265249]) assert_allclose(expected, calculated, rtol=0.001) # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_cdf(x, 1.0, 2.0, 1.0) << ", "; calculated = stats.crystalball.cdf(X, beta=1.0, m=2.0) expected = np.array([0.12172, 0.132785, 0.146064, 0.162293, 0.18258, 0.208663, 0.24344, 0.292128, 0.36516, 0.478254, 0.622723, 0.767192, 0.880286, 0.94959, 0.982834, 0.995314, 0.998981, 0.999824, 0.999976, 0.999997]) assert_allclose(expected, calculated, rtol=0.001) # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 1.0) << ", "; calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0) expected = np.array([0.00442081, 0.00559509, 0.00730787, 0.00994682, 0.0143234, 0.0223803, 0.0397873, 0.0830763, 0.173323, 0.320592, 0.508717, 0.696841, 0.844111, 0.934357, 0.977646, 0.993899, 0.998674, 0.999771, 0.999969, 0.999997]) assert_allclose(expected, calculated, rtol=0.001) # for(float x = -5.0; x < 5.0; x+=0.5) # std::cout << ROOT::Math::crystalball_cdf(x, 2.0, 3.0, 2.0, 0.5) << ", "; calculated = stats.crystalball.cdf(X, beta=2.0, m=3.0, loc=0.5, scale=2.0) expected = np.array([0.0176832, 0.0223803, 0.0292315, 0.0397873, 0.0567945, 0.0830763, 0.121242, 0.173323, 0.24011, 0.320592, 0.411731, 0.508717, 0.605702, 0.696841, 0.777324, 0.844111, 0.896192, 0.934357, 0.960639, 0.977646]) assert_allclose(expected, calculated, rtol=0.001) def test_crystalball_function_moments(): """ All values are calculated using the pdf formula and the integrate function of Mathematica """ # The Last two (alpha, n) pairs test the special case n == alpha**2 beta = np.array([2.0, 1.0, 3.0, 2.0, 3.0]) m = np.array([3.0, 3.0, 2.0, 4.0, 9.0]) # The distribution should be correctly normalised expected_0th_moment = np.array([1.0, 1.0, 1.0, 1.0, 1.0]) calculated_0th_moment = stats.crystalball._munp(0, beta, m) assert_allclose(expected_0th_moment, calculated_0th_moment, rtol=0.001) # calculated using wolframalpha.com # e.g. for beta = 2 and m = 3 we calculate the norm like this: # integrate exp(-x^2/2) from -2 to infinity + integrate (3/2)^3*exp(-2^2/2)*(3/2-2-x)^(-3) from -infinity to -2 norm = np.array([2.5511, 3.01873, 2.51065, 2.53983, 2.507410455]) expected_1th_moment = np.array([-0.21992, -3.03265, np.inf, -0.135335, -0.003174]) / norm calculated_1th_moment = stats.crystalball._munp(1, beta, m) assert_allclose(expected_1th_moment, calculated_1th_moment, rtol=0.001) expected_2th_moment = np.array([np.inf, np.inf, np.inf, 3.2616, 2.519908]) / norm calculated_2th_moment = stats.crystalball._munp(2, beta, m) assert_allclose(expected_2th_moment, calculated_2th_moment, rtol=0.001) expected_3th_moment = np.array([np.inf, np.inf, np.inf, np.inf, -0.0577668]) / norm calculated_3th_moment = stats.crystalball._munp(3, beta, m) assert_allclose(expected_3th_moment, calculated_3th_moment, rtol=0.001) expected_4th_moment = np.array([np.inf, np.inf, np.inf, np.inf, 7.78468]) / norm calculated_4th_moment = stats.crystalball._munp(4, beta, m) assert_allclose(expected_4th_moment, calculated_4th_moment, rtol=0.001) expected_5th_moment = np.array([np.inf, np.inf, np.inf, np.inf, -1.31086]) / norm calculated_5th_moment = stats.crystalball._munp(5, beta, m) assert_allclose(expected_5th_moment, calculated_5th_moment, rtol=0.001) def test_argus_function(): # There is no usable reference implementation. # (RooFit implementation returns unreasonable results which are not normalized correctly) # Instead we do some tests if the distribution behaves as expected for different shapes and scales for i in range(1, 10): for j in range(1, 10): assert_equal(stats.argus.pdf(i + 0.001, chi=j, scale=i), 0.0) assert_(stats.argus.pdf(i - 0.001, chi=j, scale=i) > 0.0) assert_equal(stats.argus.pdf(-0.001, chi=j, scale=i), 0.0) assert_(stats.argus.pdf(+0.001, chi=j, scale=i) > 0.0) for i in range(1, 10): assert_equal(stats.argus.cdf(1.0, chi=i), 1.0) assert_equal(stats.argus.cdf(1.0, chi=i), 1.0 - stats.argus.sf(1.0, chi=i)) class TestHistogram(object): def setup_method(self): np.random.seed(1234) # We have 8 bins # [1,2), [2,3), [3,4), [4,5), [5,6), [6,7), [7,8), [8,9) # But actually np.histogram will put the last 9 also in the [8,9) bin! # Therefore there is a slight difference below for the last bin, from # what you might have expected. histogram = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8) self.template = stats.rv_histogram(histogram) data = stats.norm.rvs(loc=1.0, scale=2.5,size=10000, random_state=123) norm_histogram = np.histogram(data, bins=50) self.norm_template = stats.rv_histogram(norm_histogram) def test_pdf(self): values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) pdf_values = np.asarray([0.0/25.0, 0.0/25.0, 1.0/25.0, 1.0/25.0, 2.0/25.0, 2.0/25.0, 3.0/25.0, 3.0/25.0, 4.0/25.0, 4.0/25.0, 5.0/25.0, 5.0/25.0, 4.0/25.0, 4.0/25.0, 3.0/25.0, 3.0/25.0, 3.0/25.0, 3.0/25.0, 0.0/25.0, 0.0/25.0]) assert_allclose(self.template.pdf(values), pdf_values) # Test explicitly the corner cases: # As stated above the pdf in the bin [8,9) is greater than # one would naively expect because np.histogram putted the 9 # into the [8,9) bin. assert_almost_equal(self.template.pdf(8.0), 3.0/25.0) assert_almost_equal(self.template.pdf(8.5), 3.0/25.0) # 9 is outside our defined bins [8,9) hence the pdf is already 0 # for a continuous distribution this is fine, because a single value # does not have a finite probability! assert_almost_equal(self.template.pdf(9.0), 0.0/25.0) assert_almost_equal(self.template.pdf(10.0), 0.0/25.0) x = np.linspace(-2, 2, 10) assert_allclose(self.norm_template.pdf(x), stats.norm.pdf(x, loc=1.0, scale=2.5), rtol=0.1) def test_cdf_ppf(self): values = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]) cdf_values = np.asarray([0.0/25.0, 0.0/25.0, 0.0/25.0, 0.5/25.0, 1.0/25.0, 2.0/25.0, 3.0/25.0, 4.5/25.0, 6.0/25.0, 8.0/25.0, 10.0/25.0, 12.5/25.0, 15.0/25.0, 17.0/25.0, 19.0/25.0, 20.5/25.0, 22.0/25.0, 23.5/25.0, 25.0/25.0, 25.0/25.0]) assert_allclose(self.template.cdf(values), cdf_values) # First three and last two values in cdf_value are not unique assert_allclose(self.template.ppf(cdf_values[2:-1]), values[2:-1]) # Test of cdf and ppf are inverse functions x = np.linspace(1.0, 9.0, 100) assert_allclose(self.template.ppf(self.template.cdf(x)), x) x = np.linspace(0.0, 1.0, 100) assert_allclose(self.template.cdf(self.template.ppf(x)), x) x = np.linspace(-2, 2, 10) assert_allclose(self.norm_template.cdf(x), stats.norm.cdf(x, loc=1.0, scale=2.5), rtol=0.1) def test_rvs(self): N = 10000 sample = self.template.rvs(size=N, random_state=123) assert_equal(np.sum(sample < 1.0), 0.0) assert_allclose(np.sum(sample <= 2.0), 1.0/25.0 * N, rtol=0.2) assert_allclose(np.sum(sample <= 2.5), 2.0/25.0 * N, rtol=0.2) assert_allclose(np.sum(sample <= 3.0), 3.0/25.0 * N, rtol=0.1) assert_allclose(np.sum(sample <= 3.5), 4.5/25.0 * N, rtol=0.1) assert_allclose(np.sum(sample <= 4.0), 6.0/25.0 * N, rtol=0.1) assert_allclose(np.sum(sample <= 4.5), 8.0/25.0 * N, rtol=0.1) assert_allclose(np.sum(sample <= 5.0), 10.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 5.5), 12.5/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 6.0), 15.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 6.5), 17.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 7.0), 19.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 7.5), 20.5/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 8.0), 22.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 8.5), 23.5/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) assert_allclose(np.sum(sample <= 9.0), 25.0/25.0 * N, rtol=0.05) assert_equal(np.sum(sample > 9.0), 0.0) def test_munp(self): for n in range(4): assert_allclose(self.norm_template._munp(n), stats.norm._munp(n, 1.0, 2.5), rtol=0.05) def test_entropy(self): assert_allclose(self.norm_template.entropy(), stats.norm.entropy(loc=1.0, scale=2.5), rtol=0.05)
129,718
36.75291
117
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_multivariate.py
""" Test functions for multivariate normal distributions. """ from __future__ import division, print_function, absolute_import import pickle from numpy.testing import (assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_equal, assert_array_less, assert_) from pytest import raises as assert_raises from .test_continuous_basic import check_distribution_rvs import numpy import numpy as np import scipy.linalg from scipy.stats._multivariate import _PSD, _lnB from scipy.stats import multivariate_normal from scipy.stats import matrix_normal from scipy.stats import special_ortho_group, ortho_group from scipy.stats import random_correlation from scipy.stats import unitary_group from scipy.stats import dirichlet, beta from scipy.stats import wishart, multinomial, invwishart, chi2, invgamma from scipy.stats import norm, uniform from scipy.stats import ks_2samp, kstest from scipy.stats import binom from scipy.integrate import romb from .common_tests import check_random_state_property class TestMultivariateNormal(object): def test_input_shape(self): mu = np.arange(3) cov = np.identity(2) assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov) assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov) assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov) assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov) def test_scalar_values(self): np.random.seed(1234) # When evaluated on scalar data, the pdf should return a scalar x, mean, cov = 1.5, 1.7, 2.5 pdf = multivariate_normal.pdf(x, mean, cov) assert_equal(pdf.ndim, 0) # When evaluated on a single vector, the pdf should return a scalar x = np.random.randn(5) mean = np.random.randn(5) cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix pdf = multivariate_normal.pdf(x, mean, cov) assert_equal(pdf.ndim, 0) # When evaluated on scalar data, the cdf should return a scalar x, mean, cov = 1.5, 1.7, 2.5 cdf = multivariate_normal.cdf(x, mean, cov) assert_equal(cdf.ndim, 0) # When evaluated on a single vector, the cdf should return a scalar x = np.random.randn(5) mean = np.random.randn(5) cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix cdf = multivariate_normal.cdf(x, mean, cov) assert_equal(cdf.ndim, 0) def test_logpdf(self): # Check that the log of the pdf is in fact the logpdf np.random.seed(1234) x = np.random.randn(5) mean = np.random.randn(5) cov = np.abs(np.random.randn(5)) d1 = multivariate_normal.logpdf(x, mean, cov) d2 = multivariate_normal.pdf(x, mean, cov) assert_allclose(d1, np.log(d2)) def test_logpdf_default_values(self): # Check that the log of the pdf is in fact the logpdf # with default parameters Mean=None and cov = 1 np.random.seed(1234) x = np.random.randn(5) d1 = multivariate_normal.logpdf(x) d2 = multivariate_normal.pdf(x) # check whether default values are being used d3 = multivariate_normal.logpdf(x, None, 1) d4 = multivariate_normal.pdf(x, None, 1) assert_allclose(d1, np.log(d2)) assert_allclose(d3, np.log(d4)) def test_logcdf(self): # Check that the log of the cdf is in fact the logcdf np.random.seed(1234) x = np.random.randn(5) mean = np.random.randn(5) cov = np.abs(np.random.randn(5)) d1 = multivariate_normal.logcdf(x, mean, cov) d2 = multivariate_normal.cdf(x, mean, cov) assert_allclose(d1, np.log(d2)) def test_logcdf_default_values(self): # Check that the log of the cdf is in fact the logcdf # with default parameters Mean=None and cov = 1 np.random.seed(1234) x = np.random.randn(5) d1 = multivariate_normal.logcdf(x) d2 = multivariate_normal.cdf(x) # check whether default values are being used d3 = multivariate_normal.logcdf(x, None, 1) d4 = multivariate_normal.cdf(x, None, 1) assert_allclose(d1, np.log(d2)) assert_allclose(d3, np.log(d4)) def test_rank(self): # Check that the rank is detected correctly. np.random.seed(1234) n = 4 mean = np.random.randn(n) for expected_rank in range(1, n + 1): s = np.random.randn(n, expected_rank) cov = np.dot(s, s.T) distn = multivariate_normal(mean, cov, allow_singular=True) assert_equal(distn.cov_info.rank, expected_rank) def test_degenerate_distributions(self): def _sample_orthonormal_matrix(n): M = np.random.randn(n, n) u, s, v = scipy.linalg.svd(M) return u for n in range(1, 5): x = np.random.randn(n) for k in range(1, n + 1): # Sample a small covariance matrix. s = np.random.randn(k, k) cov_kk = np.dot(s, s.T) # Embed the small covariance matrix into a larger low rank matrix. cov_nn = np.zeros((n, n)) cov_nn[:k, :k] = cov_kk # Define a rotation of the larger low rank matrix. u = _sample_orthonormal_matrix(n) cov_rr = np.dot(u, np.dot(cov_nn, u.T)) y = np.dot(u, x) # Check some identities. distn_kk = multivariate_normal(np.zeros(k), cov_kk, allow_singular=True) distn_nn = multivariate_normal(np.zeros(n), cov_nn, allow_singular=True) distn_rr = multivariate_normal(np.zeros(n), cov_rr, allow_singular=True) assert_equal(distn_kk.cov_info.rank, k) assert_equal(distn_nn.cov_info.rank, k) assert_equal(distn_rr.cov_info.rank, k) pdf_kk = distn_kk.pdf(x[:k]) pdf_nn = distn_nn.pdf(x) pdf_rr = distn_rr.pdf(y) assert_allclose(pdf_kk, pdf_nn) assert_allclose(pdf_kk, pdf_rr) logpdf_kk = distn_kk.logpdf(x[:k]) logpdf_nn = distn_nn.logpdf(x) logpdf_rr = distn_rr.logpdf(y) assert_allclose(logpdf_kk, logpdf_nn) assert_allclose(logpdf_kk, logpdf_rr) def test_large_pseudo_determinant(self): # Check that large pseudo-determinants are handled appropriately. # Construct a singular diagonal covariance matrix # whose pseudo determinant overflows double precision. large_total_log = 1000.0 npos = 100 nzero = 2 large_entry = np.exp(large_total_log / npos) n = npos + nzero cov = np.zeros((n, n), dtype=float) np.fill_diagonal(cov, large_entry) cov[-nzero:, -nzero:] = 0 # Check some determinants. assert_equal(scipy.linalg.det(cov), 0) assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf) assert_allclose(np.linalg.slogdet(cov[:npos, :npos]), (1, large_total_log)) # Check the pseudo-determinant. psd = _PSD(cov) assert_allclose(psd.log_pdet, large_total_log) def test_broadcasting(self): np.random.seed(1234) n = 4 # Construct a random covariance matrix. data = np.random.randn(n, n) cov = np.dot(data, data.T) mean = np.random.randn(n) # Construct an ndarray which can be interpreted as # a 2x3 array whose elements are random data vectors. X = np.random.randn(2, 3, n) # Check that multiple data points can be evaluated at once. desired_pdf = multivariate_normal.pdf(X, mean, cov) desired_cdf = multivariate_normal.cdf(X, mean, cov) for i in range(2): for j in range(3): actual = multivariate_normal.pdf(X[i, j], mean, cov) assert_allclose(actual, desired_pdf[i,j]) # Repeat for cdf actual = multivariate_normal.cdf(X[i, j], mean, cov) assert_allclose(actual, desired_cdf[i,j], rtol=1e-3) def test_normal_1D(self): # The probability density function for a 1D normal variable should # agree with the standard normal distribution in scipy.stats.distributions x = np.linspace(0, 2, 10) mean, cov = 1.2, 0.9 scale = cov**0.5 d1 = norm.pdf(x, mean, scale) d2 = multivariate_normal.pdf(x, mean, cov) assert_allclose(d1, d2) # The same should hold for the cumulative distribution function d1 = norm.cdf(x, mean, scale) d2 = multivariate_normal.cdf(x, mean, cov) assert_allclose(d1, d2) def test_marginalization(self): # Integrating out one of the variables of a 2D Gaussian should # yield a 1D Gaussian mean = np.array([2.5, 3.5]) cov = np.array([[.5, 0.2], [0.2, .6]]) n = 2 ** 8 + 1 # Number of samples delta = 6 / (n - 1) # Grid spacing v = np.linspace(0, 6, n) xv, yv = np.meshgrid(v, v) pos = np.empty((n, n, 2)) pos[:, :, 0] = xv pos[:, :, 1] = yv pdf = multivariate_normal.pdf(pos, mean, cov) # Marginalize over x and y axis margin_x = romb(pdf, delta, axis=0) margin_y = romb(pdf, delta, axis=1) # Compare with standard normal distribution gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5) gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5) assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2) assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2) def test_frozen(self): # The frozen distribution should agree with the regular one np.random.seed(1234) x = np.random.randn(5) mean = np.random.randn(5) cov = np.abs(np.random.randn(5)) norm_frozen = multivariate_normal(mean, cov) assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov)) assert_allclose(norm_frozen.logpdf(x), multivariate_normal.logpdf(x, mean, cov)) assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov)) assert_allclose(norm_frozen.logcdf(x), multivariate_normal.logcdf(x, mean, cov)) def test_pseudodet_pinv(self): # Make sure that pseudo-inverse and pseudo-det agree on cutoff # Assemble random covariance matrix with large and small eigenvalues np.random.seed(1234) n = 7 x = np.random.randn(n, n) cov = np.dot(x, x.T) s, u = scipy.linalg.eigh(cov) s = 0.5 * np.ones(n) s[0] = 1.0 s[-1] = 1e-7 cov = np.dot(u, np.dot(np.diag(s), u.T)) # Set cond so that the lowest eigenvalue is below the cutoff cond = 1e-5 psd = _PSD(cov, cond=cond) psd_pinv = _PSD(psd.pinv, cond=cond) # Check that the log pseudo-determinant agrees with the sum # of the logs of all but the smallest eigenvalue assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1]))) # Check that the pseudo-determinant of the pseudo-inverse # agrees with 1 / pseudo-determinant assert_allclose(-psd.log_pdet, psd_pinv.log_pdet) def test_exception_nonsquare_cov(self): cov = [[1, 2, 3], [4, 5, 6]] assert_raises(ValueError, _PSD, cov) def test_exception_nonfinite_cov(self): cov_nan = [[1, 0], [0, np.nan]] assert_raises(ValueError, _PSD, cov_nan) cov_inf = [[1, 0], [0, np.inf]] assert_raises(ValueError, _PSD, cov_inf) def test_exception_non_psd_cov(self): cov = [[1, 0], [0, -1]] assert_raises(ValueError, _PSD, cov) def test_exception_singular_cov(self): np.random.seed(1234) x = np.random.randn(5) mean = np.random.randn(5) cov = np.ones((5, 5)) e = np.linalg.LinAlgError assert_raises(e, multivariate_normal, mean, cov) assert_raises(e, multivariate_normal.pdf, x, mean, cov) assert_raises(e, multivariate_normal.logpdf, x, mean, cov) assert_raises(e, multivariate_normal.cdf, x, mean, cov) assert_raises(e, multivariate_normal.logcdf, x, mean, cov) def test_R_values(self): # Compare the multivariate pdf with some values precomputed # in R version 3.0.1 (2013-05-16) on Mac OS X 10.6. # The values below were generated by the following R-script: # > library(mnormt) # > x <- seq(0, 2, length=5) # > y <- 3*x - 2 # > z <- x + cos(y) # > mu <- c(1, 3, 2) # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) # > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma) r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692, 0.0103803050, 0.0140250800]) x = np.linspace(0, 2, 5) y = 3 * x - 2 z = x + np.cos(y) r = np.array([x, y, z]).T mean = np.array([1, 3, 2], 'd') cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd') pdf = multivariate_normal.pdf(r, mean, cov) assert_allclose(pdf, r_pdf, atol=1e-10) # Compare the multivariate cdf with some values precomputed # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. # The values below were generated by the following R-script: # > library(mnormt) # > x <- seq(0, 2, length=5) # > y <- 3*x - 2 # > z <- x + cos(y) # > mu <- c(1, 3, 2) # > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3) # > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma) r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761, 0.1063242573, 0.2501068509]) cdf = multivariate_normal.cdf(r, mean, cov) assert_allclose(cdf, r_cdf, atol=1e-5) # Also test bivariate cdf with some values precomputed # in R version 3.3.2 (2016-10-31) on Debian GNU/Linux. # The values below were generated by the following R-script: # > library(mnormt) # > x <- seq(0, 2, length=5) # > y <- 3*x - 2 # > mu <- c(1, 3) # > Sigma <- matrix(c(1,2,2,5), 2, 2) # > r_cdf2 <- pmnorm(cbind(x,y), mu, Sigma) r_cdf2 = np.array([0.01262147, 0.05838989, 0.18389571, 0.40696599, 0.66470577]) r2 = np.array([x, y]).T mean2 = np.array([1, 3], 'd') cov2 = np.array([[1, 2], [2, 5]], 'd') cdf2 = multivariate_normal.cdf(r2, mean2, cov2) assert_allclose(cdf2, r_cdf2, atol=1e-5) def test_multivariate_normal_rvs_zero_covariance(self): mean = np.zeros(2) covariance = np.zeros((2, 2)) model = multivariate_normal(mean, covariance, allow_singular=True) sample = model.rvs() assert_equal(sample, [0, 0]) def test_rvs_shape(self): # Check that rvs parses the mean and covariance correctly, and returns # an array of the right shape N = 300 d = 4 sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N) assert_equal(sample.shape, (N, d)) sample = multivariate_normal.rvs(mean=None, cov=np.array([[2, .1], [.1, 1]]), size=N) assert_equal(sample.shape, (N, 2)) u = multivariate_normal(mean=0, cov=1) sample = u.rvs(N) assert_equal(sample.shape, (N, )) def test_large_sample(self): # Generate large sample and compare sample mean and sample covariance # with mean and covariance matrix. np.random.seed(2846) n = 3 mean = np.random.randn(n) M = np.random.randn(n, n) cov = np.dot(M, M.T) size = 5000 sample = multivariate_normal.rvs(mean, cov, size) assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1) assert_allclose(sample.mean(0), mean, rtol=1e-1) def test_entropy(self): np.random.seed(2846) n = 3 mean = np.random.randn(n) M = np.random.randn(n, n) cov = np.dot(M, M.T) rv = multivariate_normal(mean, cov) # Check that frozen distribution agrees with entropy function assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov)) # Compare entropy with manually computed expression involving # the sum of the logs of the eigenvalues of the covariance matrix eigs = np.linalg.eig(cov)[0] desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs))) assert_almost_equal(desired, rv.entropy()) def test_lnB(self): alpha = np.array([1, 1, 1]) desired = .5 # e^lnB = 1/2 for [1, 1, 1] assert_almost_equal(np.exp(_lnB(alpha)), desired) class TestMatrixNormal(object): def test_bad_input(self): # Check that bad inputs raise errors num_rows = 4 num_cols = 3 M = 0.3 * np.ones((num_rows,num_cols)) U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) # Incorrect dimensions assert_raises(ValueError, matrix_normal, np.zeros((5,4,3))) assert_raises(ValueError, matrix_normal, M, np.zeros(10), V) assert_raises(ValueError, matrix_normal, M, U, np.zeros(10)) assert_raises(ValueError, matrix_normal, M, U, U) assert_raises(ValueError, matrix_normal, M, V, V) assert_raises(ValueError, matrix_normal, M.T, U, V) # Singular covariance e = np.linalg.LinAlgError assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols))) assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V) def test_default_inputs(self): # Check that default argument handling works num_rows = 4 num_cols = 3 M = 0.3 * np.ones((num_rows,num_cols)) U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) Z = np.zeros((num_rows, num_cols)) Zr = np.zeros((num_rows, 1)) Zc = np.zeros((1, num_cols)) Ir = np.identity(num_rows) Ic = np.identity(num_cols) I1 = np.identity(1) assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape, (num_rows, num_cols)) assert_equal(matrix_normal.rvs(mean=M).shape, (num_rows, num_cols)) assert_equal(matrix_normal.rvs(rowcov=U).shape, (num_rows, 1)) assert_equal(matrix_normal.rvs(colcov=V).shape, (1, num_cols)) assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape, (num_rows, num_cols)) assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape, (num_rows, num_cols)) assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape, (num_rows, num_cols)) assert_equal(matrix_normal(mean=M).rowcov, Ir) assert_equal(matrix_normal(mean=M).colcov, Ic) assert_equal(matrix_normal(rowcov=U).mean, Zr) assert_equal(matrix_normal(rowcov=U).colcov, I1) assert_equal(matrix_normal(colcov=V).mean, Zc) assert_equal(matrix_normal(colcov=V).rowcov, I1) assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic) assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir) assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z) def test_covariance_expansion(self): # Check that covariance can be specified with scalar or vector num_rows = 4 num_cols = 3 M = 0.3 * np.ones((num_rows,num_cols)) Uv = 0.2*np.ones(num_rows) Us = 0.2 Vv = 0.1*np.ones(num_cols) Vs = 0.1 Ir = np.identity(num_rows) Ic = np.identity(num_cols) assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov, 0.2*Ir) assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov, 0.1*Ic) assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov, 0.2*Ir) assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov, 0.1*Ic) def test_frozen_matrix_normal(self): for i in range(1,5): for j in range(1,5): M = 0.3 * np.ones((i,j)) U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i)) V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j)) frozen = matrix_normal(mean=M, rowcov=U, colcov=V) rvs1 = frozen.rvs(random_state=1234) rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V, random_state=1234) assert_equal(rvs1, rvs2) X = frozen.rvs(random_state=1234) pdf1 = frozen.pdf(X) pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V) assert_equal(pdf1, pdf2) logpdf1 = frozen.logpdf(X) logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V) assert_equal(logpdf1, logpdf2) def test_matches_multivariate(self): # Check that the pdfs match those obtained by vectorising and # treating as a multivariate normal. for i in range(1,5): for j in range(1,5): M = 0.3 * np.ones((i,j)) U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i)) V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j)) frozen = matrix_normal(mean=M, rowcov=U, colcov=V) X = frozen.rvs(random_state=1234) pdf1 = frozen.pdf(X) logpdf1 = frozen.logpdf(X) vecX = X.T.flatten() vecM = M.T.flatten() cov = np.kron(V,U) pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov) logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov) assert_allclose(pdf1, pdf2, rtol=1E-10) assert_allclose(logpdf1, logpdf2, rtol=1E-10) def test_array_input(self): # Check array of inputs has the same output as the separate entries. num_rows = 4 num_cols = 3 M = 0.3 * np.ones((num_rows,num_cols)) U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) N = 10 frozen = matrix_normal(mean=M, rowcov=U, colcov=V) X1 = frozen.rvs(size=N, random_state=1234) X2 = frozen.rvs(size=N, random_state=4321) X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0) assert_equal(X.shape, (2, N, num_rows, num_cols)) array_logpdf = frozen.logpdf(X) assert_equal(array_logpdf.shape, (2, N)) for i in range(2): for j in range(N): separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M, rowcov=U, colcov=V) assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10) def test_moments(self): # Check that the sample moments match the parameters num_rows = 4 num_cols = 3 M = 0.3 * np.ones((num_rows,num_cols)) U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows)) V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols)) N = 1000 frozen = matrix_normal(mean=M, rowcov=U, colcov=V) X = frozen.rvs(size=N, random_state=1234) sample_mean = np.mean(X,axis=0) assert_allclose(sample_mean, M, atol=0.1) sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T) assert_allclose(sample_colcov, V, atol=0.1) sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape( N*num_cols,num_rows).T) assert_allclose(sample_rowcov, U, atol=0.1) class TestDirichlet(object): def test_frozen_dirichlet(self): np.random.seed(2846) n = np.random.randint(1, 32) alpha = np.random.uniform(10e-10, 100, n) d = dirichlet(alpha) assert_equal(d.var(), dirichlet.var(alpha)) assert_equal(d.mean(), dirichlet.mean(alpha)) assert_equal(d.entropy(), dirichlet.entropy(alpha)) num_tests = 10 for i in range(num_tests): x = np.random.uniform(10e-10, 100, n) x /= np.sum(x) assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha)) assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha)) def test_numpy_rvs_shape_compatibility(self): np.random.seed(2846) alpha = np.array([1.0, 2.0, 3.0]) x = np.random.dirichlet(alpha, size=7) assert_equal(x.shape, (7, 3)) assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) dirichlet.pdf(x.T, alpha) dirichlet.pdf(x.T[:-1], alpha) dirichlet.logpdf(x.T, alpha) dirichlet.logpdf(x.T[:-1], alpha) def test_alpha_with_zeros(self): np.random.seed(2846) alpha = [1.0, 0.0, 3.0] # don't pass invalid alpha to np.random.dirichlet x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_alpha_with_negative_entries(self): np.random.seed(2846) alpha = [1.0, -2.0, 3.0] # don't pass invalid alpha to np.random.dirichlet x = np.random.dirichlet(np.maximum(1e-9, alpha), size=7).T assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_with_zeros(self): alpha = np.array([1.0, 2.0, 3.0, 4.0]) x = np.array([0.1, 0.0, 0.2, 0.7]) dirichlet.pdf(x, alpha) dirichlet.logpdf(x, alpha) alpha = np.array([1.0, 1.0, 1.0, 1.0]) assert_almost_equal(dirichlet.pdf(x, alpha), 6) assert_almost_equal(dirichlet.logpdf(x, alpha), np.log(6)) def test_data_with_zeros_and_small_alpha(self): alpha = np.array([1.0, 0.5, 3.0, 4.0]) x = np.array([0.1, 0.0, 0.2, 0.7]) assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_with_negative_entries(self): alpha = np.array([1.0, 2.0, 3.0, 4.0]) x = np.array([0.1, -0.1, 0.3, 0.7]) assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_with_too_large_entries(self): alpha = np.array([1.0, 2.0, 3.0, 4.0]) x = np.array([0.1, 1.1, 0.3, 0.7]) assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_too_deep_c(self): alpha = np.array([1.0, 2.0, 3.0]) x = np.ones((2, 7, 7)) / 14 assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_alpha_too_deep(self): alpha = np.array([[1.0, 2.0], [3.0, 4.0]]) x = np.ones((2, 2, 7)) / 4 assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_alpha_correct_depth(self): alpha = np.array([1.0, 2.0, 3.0]) x = np.ones((3, 7)) / 3 dirichlet.pdf(x, alpha) dirichlet.logpdf(x, alpha) def test_non_simplex_data(self): alpha = np.array([1.0, 2.0, 3.0]) x = np.ones((3, 7)) / 2 assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_vector_too_short(self): alpha = np.array([1.0, 2.0, 3.0, 4.0]) x = np.ones((2, 7)) / 2 assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_data_vector_too_long(self): alpha = np.array([1.0, 2.0, 3.0, 4.0]) x = np.ones((5, 7)) / 5 assert_raises(ValueError, dirichlet.pdf, x, alpha) assert_raises(ValueError, dirichlet.logpdf, x, alpha) def test_mean_and_var(self): alpha = np.array([1., 0.8, 0.2]) d = dirichlet(alpha) expected_var = [1. / 12., 0.08, 0.03] expected_mean = [0.5, 0.4, 0.1] assert_array_almost_equal(d.var(), expected_var) assert_array_almost_equal(d.mean(), expected_mean) def test_scalar_values(self): alpha = np.array([0.2]) d = dirichlet(alpha) # For alpha of length 1, mean and var should be scalar instead of array assert_equal(d.mean().ndim, 0) assert_equal(d.var().ndim, 0) assert_equal(d.pdf([1.]).ndim, 0) assert_equal(d.logpdf([1.]).ndim, 0) def test_K_and_K_minus_1_calls_equal(self): # Test that calls with K and K-1 entries yield the same results. np.random.seed(2846) n = np.random.randint(1, 32) alpha = np.random.uniform(10e-10, 100, n) d = dirichlet(alpha) num_tests = 10 for i in range(num_tests): x = np.random.uniform(10e-10, 100, n) x /= np.sum(x) assert_almost_equal(d.pdf(x[:-1]), d.pdf(x)) def test_multiple_entry_calls(self): # Test that calls with multiple x vectors as matrix work np.random.seed(2846) n = np.random.randint(1, 32) alpha = np.random.uniform(10e-10, 100, n) d = dirichlet(alpha) num_tests = 10 num_multiple = 5 xm = None for i in range(num_tests): for m in range(num_multiple): x = np.random.uniform(10e-10, 100, n) x /= np.sum(x) if xm is not None: xm = np.vstack((xm, x)) else: xm = x rm = d.pdf(xm.T) rs = None for xs in xm: r = d.pdf(xs) if rs is not None: rs = np.append(rs, r) else: rs = r assert_array_almost_equal(rm, rs) def test_2D_dirichlet_is_beta(self): np.random.seed(2846) alpha = np.random.uniform(10e-10, 100, 2) d = dirichlet(alpha) b = beta(alpha[0], alpha[1]) num_tests = 10 for i in range(num_tests): x = np.random.uniform(10e-10, 100, 2) x /= np.sum(x) assert_almost_equal(b.pdf(x), d.pdf([x])) assert_almost_equal(b.mean(), d.mean()[0]) assert_almost_equal(b.var(), d.var()[0]) def test_multivariate_normal_dimensions_mismatch(): # Regression test for GH #3493. Check that setting up a PDF with a mean of # length M and a covariance matrix of size (N, N), where M != N, raises a # ValueError with an informative error message. mu = np.array([0.0, 0.0]) sigma = np.array([[1.0]]) assert_raises(ValueError, multivariate_normal, mu, sigma) # A simple check that the right error message was passed along. Checking # that the entire message is there, word for word, would be somewhat # fragile, so we just check for the leading part. try: multivariate_normal(mu, sigma) except ValueError as e: msg = "Dimension mismatch" assert_equal(str(e)[:len(msg)], msg) class TestWishart(object): def test_scale_dimensions(self): # Test that we can call the Wishart with various scale dimensions # Test case: dim=1, scale=1 true_scale = np.array(1, ndmin=2) scales = [ 1, # scalar [1], # iterable np.array(1), # 0-dim np.r_[1], # 1-dim np.array(1, ndmin=2) # 2-dim ] for scale in scales: w = wishart(1, scale) assert_equal(w.scale, true_scale) assert_equal(w.scale.shape, true_scale.shape) # Test case: dim=2, scale=[[1,0] # [0,2] true_scale = np.array([[1,0], [0,2]]) scales = [ [1,2], # iterable np.r_[1,2], # 1-dim np.array([[1,0], # 2-dim [0,2]]) ] for scale in scales: w = wishart(2, scale) assert_equal(w.scale, true_scale) assert_equal(w.scale.shape, true_scale.shape) # We cannot call with a df < dim assert_raises(ValueError, wishart, 1, np.eye(2)) # We cannot call with a 3-dimension array scale = np.array(1, ndmin=3) assert_raises(ValueError, wishart, 1, scale) def test_quantile_dimensions(self): # Test that we can call the Wishart rvs with various quantile dimensions # If dim == 1, consider x.shape = [1,1,1] X = [ 1, # scalar [1], # iterable np.array(1), # 0-dim np.r_[1], # 1-dim np.array(1, ndmin=2), # 2-dim np.array([1], ndmin=3) # 3-dim ] w = wishart(1,1) density = w.pdf(np.array(1, ndmin=3)) for x in X: assert_equal(w.pdf(x), density) # If dim == 1, consider x.shape = [1,1,*] X = [ [1,2,3], # iterable np.r_[1,2,3], # 1-dim np.array([1,2,3], ndmin=3) # 3-dim ] w = wishart(1,1) density = w.pdf(np.array([1,2,3], ndmin=3)) for x in X: assert_equal(w.pdf(x), density) # If dim == 2, consider x.shape = [2,2,1] # where x[:,:,*] = np.eye(1)*2 X = [ 2, # scalar [2,2], # iterable np.array(2), # 0-dim np.r_[2,2], # 1-dim np.array([[2,0], [0,2]]), # 2-dim np.array([[2,0], [0,2]])[:,:,np.newaxis] # 3-dim ] w = wishart(2,np.eye(2)) density = w.pdf(np.array([[2,0], [0,2]])[:,:,np.newaxis]) for x in X: assert_equal(w.pdf(x), density) def test_frozen(self): # Test that the frozen and non-frozen Wishart gives the same answers # Construct an arbitrary positive definite scale matrix dim = 4 scale = np.diag(np.arange(dim)+1) scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) scale = np.dot(scale.T, scale) # Construct a collection of positive definite matrices to test the PDF X = [] for i in range(5): x = np.diag(np.arange(dim)+(i+1)**2) x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2) x = np.dot(x.T, x) X.append(x) X = np.array(X).T # Construct a 1D and 2D set of parameters parameters = [ (10, 1, np.linspace(0.1, 10, 5)), # 1D case (10, scale, X) ] for (df, scale, x) in parameters: w = wishart(df, scale) assert_equal(w.var(), wishart.var(df, scale)) assert_equal(w.mean(), wishart.mean(df, scale)) assert_equal(w.mode(), wishart.mode(df, scale)) assert_equal(w.entropy(), wishart.entropy(df, scale)) assert_equal(w.pdf(x), wishart.pdf(x, df, scale)) def test_1D_is_chisquared(self): # The 1-dimensional Wishart with an identity scale matrix is just a # chi-squared distribution. # Test variance, mean, entropy, pdf # Kolgomorov-Smirnov test for rvs np.random.seed(482974) sn = 500 dim = 1 scale = np.eye(dim) df_range = np.arange(1, 10, 2, dtype=float) X = np.linspace(0.1,10,num=10) for df in df_range: w = wishart(df, scale) c = chi2(df) # Statistics assert_allclose(w.var(), c.var()) assert_allclose(w.mean(), c.mean()) assert_allclose(w.entropy(), c.entropy()) # PDF assert_allclose(w.pdf(X), c.pdf(X)) # rvs rvs = w.rvs(size=sn) args = (df,) alpha = 0.01 check_distribution_rvs('chi2', args, alpha, rvs) def test_is_scaled_chisquared(self): # The 2-dimensional Wishart with an arbitrary scale matrix can be # transformed to a scaled chi-squared distribution. # For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have # :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)` np.random.seed(482974) sn = 500 df = 10 dim = 4 # Construct an arbitrary positive definite matrix scale = np.diag(np.arange(4)+1) scale[np.tril_indices(4, k=-1)] = np.arange(6) scale = np.dot(scale.T, scale) # Use :math:`\lambda = [1, \dots, 1]'` lamda = np.ones((dim,1)) sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze() w = wishart(df, sigma_lamda) c = chi2(df, scale=sigma_lamda) # Statistics assert_allclose(w.var(), c.var()) assert_allclose(w.mean(), c.mean()) assert_allclose(w.entropy(), c.entropy()) # PDF X = np.linspace(0.1,10,num=10) assert_allclose(w.pdf(X), c.pdf(X)) # rvs rvs = w.rvs(size=sn) args = (df,0,sigma_lamda) alpha = 0.01 check_distribution_rvs('chi2', args, alpha, rvs) class TestMultinomial(object): def test_logpmf(self): vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7)) assert_allclose(vals1, -1.483270127243324, rtol=1e-8) vals2 = multinomial.logpmf([3, 4], 0, [.3, .7]) assert_allclose(vals2, np.NAN, rtol=1e-8) vals3 = multinomial.logpmf([3, 4], 0, [-2, 3]) assert_allclose(vals3, np.NAN, rtol=1e-8) def test_reduces_binomial(self): # test that the multinomial pmf reduces to the binomial pmf in the 2d # case val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7)) val2 = binom.logpmf(3, 7, 0.3) assert_allclose(val1, val2, rtol=1e-8) val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9)) val2 = binom.pmf(6, 14, 0.1) assert_allclose(val1, val2, rtol=1e-8) def test_R(self): # test against the values produced by this R code # (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html) # X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3] # X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL) # X # apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))) n, p = 3, [1./8, 2./8, 5./8] r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375, (2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125, (0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500, (2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500, (1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000} for x in r_vals: assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14) def test_rvs_np(self): # test that .rvs agrees w/numpy sc_rvs = multinomial.rvs(3, [1/4.]*3, size=7, random_state=123) rndm = np.random.RandomState(123) np_rvs = rndm.multinomial(3, [1/4.]*3, size=7) assert_equal(sc_rvs, np_rvs) def test_pmf(self): vals0 = multinomial.pmf((5,), 5, (1,)) assert_allclose(vals0, 1, rtol=1e-8) vals1 = multinomial.pmf((3,4), 7, (.3, .7)) assert_allclose(vals1, .22689449999999994, rtol=1e-8) vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8, (.1, .9)) assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8) x = np.empty((0,2), dtype=np.float64) vals3 = multinomial.pmf(x, 4, (.3, .7)) assert_equal(vals3, np.empty([], dtype=np.float64)) vals4 = multinomial.pmf([1,2], 4, (.3, .7)) assert_allclose(vals4, 0, rtol=1e-8) vals5 = multinomial.pmf([3, 3, 0], 6, [2/3.0, 1/3.0, 0]) assert_allclose(vals5, 0.219478737997, rtol=1e-8) def test_pmf_broadcasting(self): vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]]) assert_allclose(vals0, [.243, .384], rtol=1e-8) vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9]) assert_allclose(vals1, [.243, 0], rtol=1e-8) vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9]) assert_allclose(vals2, [[.243, 0]], rtol=1e-8) vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9]) assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8) vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9]) assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8) def test_cov(self): cov1 = multinomial.cov(5, (.2, .3, .5)) cov2 = [[5*.2*.8, -5*.2*.3, -5*.2*.5], [-5*.3*.2, 5*.3*.7, -5*.3*.5], [-5*.5*.2, -5*.5*.3, 5*.5*.5]] assert_allclose(cov1, cov2, rtol=1e-8) def test_cov_broadcasting(self): cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]]) cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]] assert_allclose(cov1, cov2, rtol=1e-8) cov3 = multinomial.cov([4, 5], [.1, .9]) cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]] assert_allclose(cov3, cov4, rtol=1e-8) cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]]) cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]], [[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]] assert_allclose(cov5, cov6, rtol=1e-8) def test_entropy(self): # this is equivalent to a binomial distribution with n=2, so the # entropy .77899774929 is easily computed "by hand" ent0 = multinomial.entropy(2, [.2, .8]) assert_allclose(ent0, binom.entropy(2, .2), rtol=1e-8) def test_entropy_broadcasting(self): ent0 = multinomial.entropy([2, 3], [.2, .3]) assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)], rtol=1e-8) ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]]) assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)], rtol=1e-8) ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]]) assert_allclose(ent2, [[binom.entropy(7, .3), binom.entropy(7, .4)], [binom.entropy(8, .3), binom.entropy(8, .4)]], rtol=1e-8) def test_mean(self): mean1 = multinomial.mean(5, [.2, .8]) assert_allclose(mean1, [5*.2, 5*.8], rtol=1e-8) def test_mean_broadcasting(self): mean1 = multinomial.mean([5, 6], [.2, .8]) assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8) def test_frozen(self): # The frozen distribution should agree with the regular one np.random.seed(1234) n = 12 pvals = (.1, .2, .3, .4) x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]] x = np.asarray(x, dtype=np.float64) mn_frozen = multinomial(n, pvals) assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals)) assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals)) assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals)) class TestInvwishart(object): def test_frozen(self): # Test that the frozen and non-frozen inverse Wishart gives the same # answers # Construct an arbitrary positive definite scale matrix dim = 4 scale = np.diag(np.arange(dim)+1) scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) scale = np.dot(scale.T, scale) # Construct a collection of positive definite matrices to test the PDF X = [] for i in range(5): x = np.diag(np.arange(dim)+(i+1)**2) x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2) x = np.dot(x.T, x) X.append(x) X = np.array(X).T # Construct a 1D and 2D set of parameters parameters = [ (10, 1, np.linspace(0.1, 10, 5)), # 1D case (10, scale, X) ] for (df, scale, x) in parameters: iw = invwishart(df, scale) assert_equal(iw.var(), invwishart.var(df, scale)) assert_equal(iw.mean(), invwishart.mean(df, scale)) assert_equal(iw.mode(), invwishart.mode(df, scale)) assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale)) def test_1D_is_invgamma(self): # The 1-dimensional inverse Wishart with an identity scale matrix is # just an inverse gamma distribution. # Test variance, mean, pdf # Kolgomorov-Smirnov test for rvs np.random.seed(482974) sn = 500 dim = 1 scale = np.eye(dim) df_range = np.arange(5, 20, 2, dtype=float) X = np.linspace(0.1,10,num=10) for df in df_range: iw = invwishart(df, scale) ig = invgamma(df/2, scale=1./2) # Statistics assert_allclose(iw.var(), ig.var()) assert_allclose(iw.mean(), ig.mean()) # PDF assert_allclose(iw.pdf(X), ig.pdf(X)) # rvs rvs = iw.rvs(size=sn) args = (df/2, 0, 1./2) alpha = 0.01 check_distribution_rvs('invgamma', args, alpha, rvs) def test_wishart_invwishart_2D_rvs(self): dim = 3 df = 10 # Construct a simple non-diagonal positive definite matrix scale = np.eye(dim) scale[0,1] = 0.5 scale[1,0] = 0.5 # Construct frozen Wishart and inverse Wishart random variables w = wishart(df, scale) iw = invwishart(df, scale) # Get the generated random variables from a known seed np.random.seed(248042) w_rvs = wishart.rvs(df, scale) np.random.seed(248042) frozen_w_rvs = w.rvs() np.random.seed(248042) iw_rvs = invwishart.rvs(df, scale) np.random.seed(248042) frozen_iw_rvs = iw.rvs() # Manually calculate what it should be, based on the Bartlett (1933) # decomposition of a Wishart into D A A' D', where D is the Cholesky # factorization of the scale matrix and A is the lower triangular matrix # with the square root of chi^2 variates on the diagonal and N(0,1) # variates in the lower triangle. np.random.seed(248042) covariances = np.random.normal(size=3) variances = np.r_[ np.random.chisquare(df), np.random.chisquare(df-1), np.random.chisquare(df-2), ]**0.5 # Construct the lower-triangular A matrix A = np.diag(variances) A[np.tril_indices(dim, k=-1)] = covariances # Wishart random variate D = np.linalg.cholesky(scale) DA = D.dot(A) manual_w_rvs = np.dot(DA, DA.T) # inverse Wishart random variate # Supposing that the inverse wishart has scale matrix `scale`, then the # random variate is the inverse of a random variate drawn from a Wishart # distribution with scale matrix `inv_scale = np.linalg.inv(scale)` iD = np.linalg.cholesky(np.linalg.inv(scale)) iDA = iD.dot(A) manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T)) # Test for equality assert_allclose(w_rvs, manual_w_rvs) assert_allclose(frozen_w_rvs, manual_w_rvs) assert_allclose(iw_rvs, manual_iw_rvs) assert_allclose(frozen_iw_rvs, manual_iw_rvs) class TestSpecialOrthoGroup(object): def test_reproducibility(self): np.random.seed(514) x = special_ortho_group.rvs(3) expected = np.array([[-0.99394515, -0.04527879, 0.10011432], [0.04821555, -0.99846897, 0.02711042], [0.09873351, 0.03177334, 0.99460653]]) assert_array_almost_equal(x, expected) random_state = np.random.RandomState(seed=514) x = special_ortho_group.rvs(3, random_state=random_state) assert_array_almost_equal(x, expected) def test_invalid_dim(self): assert_raises(ValueError, special_ortho_group.rvs, None) assert_raises(ValueError, special_ortho_group.rvs, (2, 2)) assert_raises(ValueError, special_ortho_group.rvs, 1) assert_raises(ValueError, special_ortho_group.rvs, 2.5) def test_frozen_matrix(self): dim = 7 frozen = special_ortho_group(dim) rvs1 = frozen.rvs(random_state=1234) rvs2 = special_ortho_group.rvs(dim, random_state=1234) assert_equal(rvs1, rvs2) def test_det_and_ortho(self): xs = [special_ortho_group.rvs(dim) for dim in range(2,12) for i in range(3)] # Test that determinants are always +1 dets = [np.linalg.det(x) for x in xs] assert_allclose(dets, [1.]*30, rtol=1e-13) # Test that these are orthogonal matrices for x in xs: assert_array_almost_equal(np.dot(x, x.T), np.eye(x.shape[0])) def test_haar(self): # Test that the distribution is constant under rotation # Every column should have the same distribution # Additionally, the distribution should be invariant under another rotation # Generate samples dim = 5 samples = 1000 # Not too many, or the test takes too long ks_prob = .05 np.random.seed(514) xs = special_ortho_group.rvs(dim, size=samples) # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), # effectively picking off entries in the matrices of xs. # These projections should all have the same disribution, # establishing rotational invariance. We use the two-sided # KS test to confirm this. # We could instead test that angles between random vectors # are uniformly distributed, but the below is sufficient. # It is not feasible to consider all pairs, so pick a few. els = ((0,0), (0,2), (1,4), (2,3)) #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els) pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] assert_array_less([ks_prob]*len(pairs), ks_tests) class TestOrthoGroup(object): def test_reproducibility(self): np.random.seed(515) x = ortho_group.rvs(3) x2 = ortho_group.rvs(3, random_state=515) # Note this matrix has det -1, distinguishing O(N) from SO(N) assert_almost_equal(np.linalg.det(x), -1) expected = np.array([[0.94449759, -0.21678569, -0.24683651], [-0.13147569, -0.93800245, 0.3207266], [0.30106219, 0.27047251, 0.9144431]]) assert_array_almost_equal(x, expected) assert_array_almost_equal(x2, expected) def test_invalid_dim(self): assert_raises(ValueError, ortho_group.rvs, None) assert_raises(ValueError, ortho_group.rvs, (2, 2)) assert_raises(ValueError, ortho_group.rvs, 1) assert_raises(ValueError, ortho_group.rvs, 2.5) def test_det_and_ortho(self): xs = [[ortho_group.rvs(dim) for i in range(10)] for dim in range(2,12)] # Test that abs determinants are always +1 dets = np.array([[np.linalg.det(x) for x in xx] for xx in xs]) assert_allclose(np.fabs(dets), np.ones(dets.shape), rtol=1e-13) # Test that we get both positive and negative determinants # Check that we have at least one and less than 10 negative dets in a sample of 10. The rest are positive by the previous test. # Test each dimension separately assert_array_less([0]*10, [np.where(d < 0)[0].shape[0] for d in dets]) assert_array_less([np.where(d < 0)[0].shape[0] for d in dets], [10]*10) # Test that these are orthogonal matrices for xx in xs: for x in xx: assert_array_almost_equal(np.dot(x, x.T), np.eye(x.shape[0])) def test_haar(self): # Test that the distribution is constant under rotation # Every column should have the same distribution # Additionally, the distribution should be invariant under another rotation # Generate samples dim = 5 samples = 1000 # Not too many, or the test takes too long ks_prob = .05 np.random.seed(518) # Note that the test is sensitive to seed too xs = ortho_group.rvs(dim, size=samples) # Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3), # effectively picking off entries in the matrices of xs. # These projections should all have the same disribution, # establishing rotational invariance. We use the two-sided # KS test to confirm this. # We could instead test that angles between random vectors # are uniformly distributed, but the below is sufficient. # It is not feasible to consider all pairs, so pick a few. els = ((0,0), (0,2), (1,4), (2,3)) #proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els} proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els) pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1] ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs] assert_array_less([ks_prob]*len(pairs), ks_tests) def test_pairwise_distances(self): # Test that the distribution of pairwise distances is close to correct. np.random.seed(514) def random_ortho(dim): u, _s, v = np.linalg.svd(np.random.normal(size=(dim, dim))) return np.dot(u, v) for dim in range(2, 6): def generate_test_statistics(rvs, N=1000, eps=1e-10): stats = np.array([ np.sum((rvs(dim=dim) - rvs(dim=dim))**2) for _ in range(N) ]) # Add a bit of noise to account for numeric accuracy. stats += np.random.uniform(-eps, eps, size=stats.shape) return stats expected = generate_test_statistics(random_ortho) actual = generate_test_statistics(scipy.stats.ortho_group.rvs) _D, p = scipy.stats.ks_2samp(expected, actual) assert_array_less(.05, p) class TestRandomCorrelation(object): def test_reproducibility(self): np.random.seed(514) eigs = (.5, .8, 1.2, 1.5) x = random_correlation.rvs((.5, .8, 1.2, 1.5)) x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514) expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711], [-0.20387311, 1., -0.24351129, 0.06703474], [0.18366501, -0.24351129, 1., 0.38530195], [-0.04953711, 0.06703474, 0.38530195, 1.]]) assert_array_almost_equal(x, expected) assert_array_almost_equal(x2, expected) def test_invalid_eigs(self): assert_raises(ValueError, random_correlation.rvs, None) assert_raises(ValueError, random_correlation.rvs, 'test') assert_raises(ValueError, random_correlation.rvs, 2.5) assert_raises(ValueError, random_correlation.rvs, [2.5]) assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]]) assert_raises(ValueError, random_correlation.rvs, [2.5, -.5]) assert_raises(ValueError, random_correlation.rvs, [1, 2, .1]) def test_definition(self): # Test the definition of a correlation matrix in several dimensions: # # 1. Det is product of eigenvalues (and positive by construction # in examples) # 2. 1's on diagonal # 3. Matrix is symmetric def norm(i, e): return i*e/sum(e) np.random.seed(123) eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)] eigs.append([4,0,0,0]) ones = [[1.]*len(e) for e in eigs] xs = [random_correlation.rvs(e) for e in eigs] # Test that determinants are products of eigenvalues # These are positive by construction # Could also test that the eigenvalues themselves are correct, # but this seems sufficient. dets = [np.fabs(np.linalg.det(x)) for x in xs] dets_known = [np.prod(e) for e in eigs] assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13) # Test for 1's on the diagonal diags = [np.diag(x) for x in xs] for a, b in zip(diags, ones): assert_allclose(a, b, rtol=1e-13) # Correlation matrices are symmetric for x in xs: assert_allclose(x, x.T, rtol=1e-13) def test_to_corr(self): # Check some corner cases in to_corr # ajj == 1 m = np.array([[0.1, 0], [0, 1]], dtype=float) m = random_correlation._to_corr(m) assert_allclose(m, np.array([[1, 0], [0, 0.1]])) # Floating point overflow; fails to compute the correct # rotation, but should still produce some valid rotation # rather than infs/nans with np.errstate(over='ignore'): g = np.array([[0, 1], [-1, 0]]) m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m, g.T.dot(m0).dot(g)) m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m, g.T.dot(m0).dot(g)) # Zero discriminant; should set the first diag entry to 1 m0 = np.array([[2, 1], [1, 2]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m[0,0], 1) # Slightly negative discriminant; should be approx correct still m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float) m = random_correlation._to_corr(m0.copy()) assert_allclose(m[0,0], 1) class TestUnitaryGroup(object): def test_reproducibility(self): np.random.seed(514) x = unitary_group.rvs(3) x2 = unitary_group.rvs(3, random_state=514) expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j], [0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j], [-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]]) assert_array_almost_equal(x, expected) assert_array_almost_equal(x2, expected) def test_invalid_dim(self): assert_raises(ValueError, unitary_group.rvs, None) assert_raises(ValueError, unitary_group.rvs, (2, 2)) assert_raises(ValueError, unitary_group.rvs, 1) assert_raises(ValueError, unitary_group.rvs, 2.5) def test_unitarity(self): xs = [unitary_group.rvs(dim) for dim in range(2,12) for i in range(3)] # Test that these are unitary matrices for x in xs: assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15) def test_haar(self): # Test that the eigenvalues, which lie on the unit circle in # the complex plane, are uncorrelated. # Generate samples dim = 5 samples = 1000 # Not too many, or the test takes too long np.random.seed(514) # Note that the test is sensitive to seed too xs = unitary_group.rvs(dim, size=samples) # The angles "x" of the eigenvalues should be uniformly distributed # Overall this seems to be a necessary but weak test of the distribution. eigs = np.vstack(scipy.linalg.eigvals(x) for x in xs) x = np.arctan2(eigs.imag, eigs.real) res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf) assert_(res.pvalue > 0.05) def check_pickling(distfn, args): # check that a distribution instance pickles and unpickles # pay special attention to the random_state property # save the random_state (restore later) rndm = distfn.random_state distfn.random_state = 1234 distfn.rvs(*args, size=8) s = pickle.dumps(distfn) r0 = distfn.rvs(*args, size=8) unpickled = pickle.loads(s) r1 = unpickled.rvs(*args, size=8) assert_equal(r0, r1) # restore the random_state distfn.random_state = rndm def test_random_state_property(): scale = np.eye(3) scale[0, 1] = 0.5 scale[1, 0] = 0.5 dists = [ [multivariate_normal, ()], [dirichlet, (np.array([1.]), )], [wishart, (10, scale)], [invwishart, (10, scale)], [multinomial, (5, [0.5, 0.4, 0.1])], [ortho_group, (2,)], [special_ortho_group, (2,)] ] for distfn, args in dists: check_random_state_property(distfn, args) check_pickling(distfn, args)
62,607
37.362745
135
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_discrete_distns.py
from __future__ import division, print_function, absolute_import from scipy.stats import hypergeom, bernoulli import numpy as np from numpy.testing import assert_almost_equal def test_hypergeom_logpmf(): # symmetries test # f(k,N,K,n) = f(n-k,N,N-K,n) = f(K-k,N,K,N-n) = f(k,N,n,K) k = 5 N = 50 K = 10 n = 5 logpmf1 = hypergeom.logpmf(k,N,K,n) logpmf2 = hypergeom.logpmf(n-k,N,N-K,n) logpmf3 = hypergeom.logpmf(K-k,N,K,N-n) logpmf4 = hypergeom.logpmf(k,N,n,K) assert_almost_equal(logpmf1, logpmf2, decimal=12) assert_almost_equal(logpmf1, logpmf3, decimal=12) assert_almost_equal(logpmf1, logpmf4, decimal=12) # test related distribution # Bernoulli distribution if n = 1 k = 1 N = 10 K = 7 n = 1 hypergeom_logpmf = hypergeom.logpmf(k,N,K,n) bernoulli_logpmf = bernoulli.logpmf(k,K/N) assert_almost_equal(hypergeom_logpmf, bernoulli_logpmf, decimal=12)
946
28.59375
71
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_mstats_extras.py
from __future__ import division, print_function, absolute_import import numpy as np import numpy.ma as ma import scipy.stats.mstats as ms from numpy.testing import (assert_equal, assert_almost_equal, assert_, assert_allclose) def test_compare_medians_ms(): x = np.arange(7) y = x + 10 assert_almost_equal(ms.compare_medians_ms(x, y), 0) y2 = np.linspace(0, 1, num=10) assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778) def test_hdmedian(): # 1-D array x = ma.arange(11) assert_allclose(ms.hdmedian(x), 5, rtol=1e-14) x.mask = ma.make_mask(x) x.mask[:7] = False assert_allclose(ms.hdmedian(x), 3, rtol=1e-14) # Check that `var` keyword returns a value. TODO: check whether returned # value is actually correct. assert_(ms.hdmedian(x, var=True).size == 2) # 2-D array x2 = ma.arange(22).reshape((11, 2)) assert_allclose(ms.hdmedian(x2, axis=0), [10, 11]) x2.mask = ma.make_mask(x2) x2.mask[:7, :] = False assert_allclose(ms.hdmedian(x2, axis=0), [6, 7]) def test_rsh(): np.random.seed(132345) x = np.random.randn(100) res = ms.rsh(x) # Just a sanity check that the code runs and output shape is correct. # TODO: check that implementation is correct. assert_(res.shape == x.shape) # Check points keyword res = ms.rsh(x, points=[0, 1.]) assert_(res.size == 2) def test_mjci(): # Tests the Marits-Jarrett estimator data = ma.array([77, 87, 88,114,151,210,219,246,253,262, 296,299,306,376,428,515,666,1310,2611]) assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5) def test_trimmed_mean_ci(): # Tests the confidence intervals of the trimmed mean. data = ma.array([545,555,558,572,575,576,578,580, 594,605,635,651,653,661,666]) assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1) assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1), [561.8, 630.6]) def test_idealfourths(): # Tests ideal-fourths test = np.arange(100) assert_almost_equal(np.asarray(ms.idealfourths(test)), [24.416667,74.583333],6) test_2D = test.repeat(3).reshape(-1,3) assert_almost_equal(ms.idealfourths(test_2D, axis=0), [[24.416667,24.416667,24.416667], [74.583333,74.583333,74.583333]],6) assert_almost_equal(ms.idealfourths(test_2D, axis=1), test.repeat(2).reshape(-1,2)) test = [0, 0] _result = ms.idealfourths(test) assert_(np.isnan(_result).all()) class TestQuantiles(object): data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014, 0.887764025,0.239407086,0.349638551,0.972791145,0.149789972, 0.936947700,0.132359948,0.046041972,0.641675031,0.945530547, 0.224218684,0.771450991,0.820257774,0.336458052,0.589113496, 0.509736129,0.696838829,0.491323573,0.622767425,0.775189248, 0.641461450,0.118455200,0.773029450,0.319280007,0.752229111, 0.047841438,0.466295911,0.583850781,0.840581845,0.550086491, 0.466470062,0.504765074,0.226855960,0.362641207,0.891620942, 0.127898691,0.490094097,0.044882048,0.041441695,0.317976349, 0.504135618,0.567353033,0.434617473,0.636243375,0.231803616, 0.230154113,0.160011327,0.819464108,0.854706985,0.438809221, 0.487427267,0.786907310,0.408367937,0.405534192,0.250444460, 0.995309248,0.144389588,0.739947527,0.953543606,0.680051621, 0.388382017,0.863530727,0.006514031,0.118007779,0.924024803, 0.384236354,0.893687694,0.626534881,0.473051932,0.750134705, 0.241843555,0.432947602,0.689538104,0.136934797,0.150206859, 0.474335206,0.907775349,0.525869295,0.189184225,0.854284286, 0.831089744,0.251637345,0.587038213,0.254475554,0.237781276, 0.827928620,0.480283781,0.594514455,0.213641488,0.024194386, 0.536668589,0.699497811,0.892804071,0.093835427,0.731107772] def test_hdquantiles(self): data = self.data assert_almost_equal(ms.hdquantiles(data,[0., 1.]), [0.006514031, 0.995309248]) hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75]) assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,]) hdq = ms.hdquantiles_sd(data,[0.25, 0.5, 0.75]) assert_almost_equal(hdq, [0.03786954, 0.03805389, 0.03800152,], 4) data = np.array(data).reshape(10,10) hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0) assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75])) assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75])) hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True) assert_almost_equal(hdq[...,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True)) assert_almost_equal(hdq[...,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True)) def test_hdquantiles_sd(self): # Only test that code runs, implementation not checked for correctness res = ms.hdquantiles_sd(self.data) assert_(res.size == 3) def test_mquantiles_cimj(self): # Only test that code runs, implementation not checked for correctness ci_lower, ci_upper = ms.mquantiles_cimj(self.data) assert_(ci_lower.size == ci_upper.size == 3)
5,464
38.890511
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/__init__.py
0
0
0
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_fit.py
from __future__ import division, print_function, absolute_import import os import numpy as np from numpy.testing import assert_allclose from scipy._lib._numpy_compat import suppress_warnings import pytest from scipy import stats from .test_continuous_basic import distcont # this is not a proper statistical test for convergence, but only # verifies that the estimate and true values don't differ by too much fit_sizes = [1000, 5000] # sample sizes to try thresh_percent = 0.25 # percent of true parameters for fail cut-off thresh_min = 0.75 # minimum difference estimate - true to fail test failing_fits = [ 'burr', 'chi2', 'gausshyper', 'genexpon', 'gengamma', 'kappa4', 'ksone', 'mielke', 'ncf', 'ncx2', 'pearson3', 'powerlognorm', 'truncexpon', 'tukeylambda', 'vonmises', 'wrapcauchy', 'levy_stable', 'trapz' ] # Don't run the fit test on these: skip_fit = [ 'erlang', # Subclass of gamma, generates a warning. ] def cases_test_cont_fit(): # this tests the closeness of the estimated parameters to the true # parameters with fit method of continuous distributions # Note: is slow, some distributions don't converge with sample size <= 10000 for distname, arg in distcont: if distname not in skip_fit: yield distname, arg @pytest.mark.slow @pytest.mark.parametrize('distname,arg', cases_test_cont_fit()) def test_cont_fit(distname, arg): if distname in failing_fits: # Skip failing fits unless overridden xfail = True try: xfail = not int(os.environ['SCIPY_XFAIL']) except: pass if xfail: msg = "Fitting %s doesn't work reliably yet" % distname msg += " [Set environment variable SCIPY_XFAIL=1 to run this test nevertheless.]" pytest.xfail(msg) distfn = getattr(stats, distname) truearg = np.hstack([arg, [0.0, 1.0]]) diffthreshold = np.max(np.vstack([truearg*thresh_percent, np.ones(distfn.numargs+2)*thresh_min]), 0) for fit_size in fit_sizes: # Note that if a fit succeeds, the other fit_sizes are skipped np.random.seed(1234) with np.errstate(all='ignore'), suppress_warnings() as sup: sup.filter(category=DeprecationWarning, message=".*frechet_") rvs = distfn.rvs(size=fit_size, *arg) est = distfn.fit(rvs) # start with default values diff = est - truearg # threshold for location diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min]) if np.any(np.isnan(est)): raise AssertionError('nan returned in fit') else: if np.all(np.abs(diff) <= diffthreshold): break else: txt = 'parameter: %s\n' % str(truearg) txt += 'estimated: %s\n' % str(est) txt += 'diff : %s\n' % str(diff) raise AssertionError('fit not very good in %s\n' % distfn.name + txt) def _check_loc_scale_mle_fit(name, data, desired, atol=None): d = getattr(stats, name) actual = d.fit(data)[-2:] assert_allclose(actual, desired, atol=atol, err_msg='poor mle fit of (loc, scale) in %s' % name) def test_non_default_loc_scale_mle_fit(): data = np.array([1.01, 1.78, 1.78, 1.78, 1.88, 1.88, 1.88, 2.00]) _check_loc_scale_mle_fit('uniform', data, [1.01, 0.99], 1e-3) _check_loc_scale_mle_fit('expon', data, [1.01, 0.73875], 1e-3) def test_expon_fit(): """gh-6167""" data = [0, 0, 0, 0, 2, 2, 2, 2] phat = stats.expon.fit(data, floc=0) assert_allclose(phat, [0, 1.0], atol=1e-3)
3,803
29.677419
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_rank.py
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal, assert_array_equal from scipy.stats import rankdata, tiecorrect class TestTieCorrect(object): def test_empty(self): """An empty array requires no correction, should return 1.0.""" ranks = np.array([], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_one(self): """A single element requires no correction, should return 1.0.""" ranks = np.array([1.0], dtype=np.float64) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_no_correction(self): """Arrays with no ties require no correction.""" ranks = np.arange(2.0) c = tiecorrect(ranks) assert_equal(c, 1.0) ranks = np.arange(3.0) c = tiecorrect(ranks) assert_equal(c, 1.0) def test_basic(self): """Check a few basic examples of the tie correction factor.""" # One tie of two elements ranks = np.array([1.0, 2.5, 2.5]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of two elements (same as above, but tie is not at the end) ranks = np.array([1.5, 1.5, 3.0]) c = tiecorrect(ranks) T = 2.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # One tie of three elements ranks = np.array([1.0, 3.0, 3.0, 3.0]) c = tiecorrect(ranks) T = 3.0 N = ranks.size expected = 1.0 - (T**3 - T) / (N**3 - N) assert_equal(c, expected) # Two ties, lengths 2 and 3. ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0]) c = tiecorrect(ranks) T1 = 2.0 T2 = 3.0 N = ranks.size expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N) assert_equal(c, expected) def test_overflow(self): ntie, k = 2000, 5 a = np.repeat(np.arange(k), ntie) n = a.size # ntie * k out = tiecorrect(rankdata(a)) assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n)) class TestRankData(object): def test_empty(self): """stats.rankdata([]) should return an empty array.""" a = np.array([], dtype=int) r = rankdata(a) assert_array_equal(r, np.array([], dtype=np.float64)) r = rankdata([]) assert_array_equal(r, np.array([], dtype=np.float64)) def test_one(self): """Check stats.rankdata with an array of length 1.""" data = [100] a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, np.array([1.0], dtype=np.float64)) r = rankdata(data) assert_array_equal(r, np.array([1.0], dtype=np.float64)) def test_basic(self): """Basic tests of stats.rankdata.""" data = [100, 10, 50] expected = np.array([3.0, 1.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [40, 10, 30, 10, 50] expected = np.array([4.0, 1.5, 3.0, 1.5, 5.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) data = [20, 20, 20, 10, 10, 10] expected = np.array([5.0, 5.0, 5.0, 2.0, 2.0, 2.0], dtype=np.float64) a = np.array(data, dtype=int) r = rankdata(a) assert_array_equal(r, expected) r = rankdata(data) assert_array_equal(r, expected) # The docstring states explicitly that the argument is flattened. a2d = a.reshape(2, 3) r = rankdata(a2d) assert_array_equal(r, expected) def test_rankdata_object_string(self): min_rank = lambda a: [1 + sum(i < j for i in a) for j in a] max_rank = lambda a: [sum(i <= j for i in a) for j in a] ordinal_rank = lambda a: min_rank([(x, i) for i, x in enumerate(a)]) def average_rank(a): return [(i + j) / 2.0 for i, j in zip(min_rank(a), max_rank(a))] def dense_rank(a): b = np.unique(a) return [1 + sum(i < j for i in b) for j in a] rankf = dict(min=min_rank, max=max_rank, ordinal=ordinal_rank, average=average_rank, dense=dense_rank) def check_ranks(a): for method in 'min', 'max', 'dense', 'ordinal', 'average': out = rankdata(a, method=method) assert_array_equal(out, rankf[method](a)) val = ['foo', 'bar', 'qux', 'xyz', 'abc', 'efg', 'ace', 'qwe', 'qaz'] check_ranks(np.random.choice(val, 200)) check_ranks(np.random.choice(val, 200).astype('object')) val = np.array([0, 1, 2, 2.718, 3, 3.141], dtype='object') check_ranks(np.random.choice(val, 200).astype('object')) def test_large_int(self): data = np.array([2**60, 2**60+1], dtype=np.uint64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, 2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [1.0, 2.0]) data = np.array([2**60, -2**60+1], dtype=np.int64) r = rankdata(data) assert_array_equal(r, [2.0, 1.0]) def test_big_tie(self): for n in [10000, 100000, 1000000]: data = np.ones(n, dtype=int) r = rankdata(data) expected_rank = 0.5 * (n + 1) assert_array_equal(r, expected_rank * data, "test failed with n=%d" % n) _cases = ( # values, method, expected ([], 'average', []), ([], 'min', []), ([], 'max', []), ([], 'dense', []), ([], 'ordinal', []), # ([100], 'average', [1.0]), ([100], 'min', [1.0]), ([100], 'max', [1.0]), ([100], 'dense', [1.0]), ([100], 'ordinal', [1.0]), # ([100, 100, 100], 'average', [2.0, 2.0, 2.0]), ([100, 100, 100], 'min', [1.0, 1.0, 1.0]), ([100, 100, 100], 'max', [3.0, 3.0, 3.0]), ([100, 100, 100], 'dense', [1.0, 1.0, 1.0]), ([100, 100, 100], 'ordinal', [1.0, 2.0, 3.0]), # ([100, 300, 200], 'average', [1.0, 3.0, 2.0]), ([100, 300, 200], 'min', [1.0, 3.0, 2.0]), ([100, 300, 200], 'max', [1.0, 3.0, 2.0]), ([100, 300, 200], 'dense', [1.0, 3.0, 2.0]), ([100, 300, 200], 'ordinal', [1.0, 3.0, 2.0]), # ([100, 200, 300, 200], 'average', [1.0, 2.5, 4.0, 2.5]), ([100, 200, 300, 200], 'min', [1.0, 2.0, 4.0, 2.0]), ([100, 200, 300, 200], 'max', [1.0, 3.0, 4.0, 3.0]), ([100, 200, 300, 200], 'dense', [1.0, 2.0, 3.0, 2.0]), ([100, 200, 300, 200], 'ordinal', [1.0, 2.0, 4.0, 3.0]), # ([100, 200, 300, 200, 100], 'average', [1.5, 3.5, 5.0, 3.5, 1.5]), ([100, 200, 300, 200, 100], 'min', [1.0, 3.0, 5.0, 3.0, 1.0]), ([100, 200, 300, 200, 100], 'max', [2.0, 4.0, 5.0, 4.0, 2.0]), ([100, 200, 300, 200, 100], 'dense', [1.0, 2.0, 3.0, 2.0, 1.0]), ([100, 200, 300, 200, 100], 'ordinal', [1.0, 3.0, 5.0, 4.0, 2.0]), # ([10] * 30, 'ordinal', np.arange(1.0, 31.0)), ) def test_cases(): for values, method, expected in _cases: r = rankdata(values, method=method) assert_array_equal(r, expected)
7,514
33.315068
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_stats.py
""" Test functions for stats module WRITTEN BY LOUIS LUANGKESORN <lluang@yahoo.com> FOR THE STATS MODULE BASED ON WILKINSON'S STATISTICS QUIZ http://www.stanford.edu/~clint/bench/wilk.txt Additional tests by a host of SciPy developers. """ from __future__ import division, print_function, absolute_import import os import sys import warnings from collections import namedtuple from numpy.testing import (assert_, assert_equal, assert_almost_equal, assert_array_almost_equal, assert_array_equal, assert_approx_equal, assert_allclose) import pytest from pytest import raises as assert_raises from scipy._lib._numpy_compat import suppress_warnings import numpy.ma.testutils as mat from numpy import array, arange, float32, float64, power import numpy as np import scipy.stats as stats import scipy.stats.mstats as mstats import scipy.stats.mstats_basic as mstats_basic from scipy._lib._version import NumpyVersion from scipy._lib.six import xrange from .common_tests import check_named_results """ Numbers in docstrings beginning with 'W' refer to the section numbers and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are considered to be essential functionality. True testing and evaluation of a statistics package requires use of the NIST Statistical test data. See McCoullough(1999) Assessing The Reliability of Statistical Software for a test methodology and its implementation in testing SAS, SPSS, and S-Plus """ # Datasets # These data sets are from the nasty.dat sets used by Wilkinson # For completeness, I should write the relevant tests and count them as failures # Somewhat acceptable, since this is still beta software. It would count as a # good target for 1.0 status X = array([1,2,3,4,5,6,7,8,9], float) ZERO = array([0,0,0,0,0,0,0,0,0], float) BIG = array([99999991,99999992,99999993,99999994,99999995,99999996,99999997, 99999998,99999999], float) LITTLE = array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996, 0.99999997,0.99999998,0.99999999], float) HUGE = array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12], float) TINY = array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12], float) ROUND = array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5], float) class TestTrimmedStats(object): # TODO: write these tests to handle missing values properly dprec = np.finfo(np.float64).precision def test_tmean(self): y = stats.tmean(X, (2, 8), (True, True)) assert_approx_equal(y, 5.0, significant=self.dprec) y1 = stats.tmean(X, limits=(2, 8), inclusive=(False, False)) y2 = stats.tmean(X, limits=None) assert_approx_equal(y1, y2, significant=self.dprec) def test_tvar(self): y = stats.tvar(X, limits=(2, 8), inclusive=(True, True)) assert_approx_equal(y, 4.6666666666666661, significant=self.dprec) y = stats.tvar(X, limits=None) assert_approx_equal(y, X.var(ddof=1), significant=self.dprec) def test_tstd(self): y = stats.tstd(X, (2, 8), (True, True)) assert_approx_equal(y, 2.1602468994692865, significant=self.dprec) y = stats.tstd(X, limits=None) assert_approx_equal(y, X.std(ddof=1), significant=self.dprec) def test_tmin(self): assert_equal(stats.tmin(4), 4) x = np.arange(10) assert_equal(stats.tmin(x), 0) assert_equal(stats.tmin(x, lowerlimit=0), 0) assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), 1) x = x.reshape((5, 2)) assert_equal(stats.tmin(x, lowerlimit=0, inclusive=False), [2, 1]) assert_equal(stats.tmin(x, axis=1), [0, 2, 4, 6, 8]) assert_equal(stats.tmin(x, axis=None), 0) x = np.arange(10.) x[9] = np.nan with suppress_warnings() as sup: r = sup.record(RuntimeWarning, "invalid value*") assert_equal(stats.tmin(x), np.nan) assert_equal(stats.tmin(x, nan_policy='omit'), 0.) assert_raises(ValueError, stats.tmin, x, nan_policy='raise') assert_raises(ValueError, stats.tmin, x, nan_policy='foobar') msg = "'propagate', 'raise', 'omit'" with assert_raises(ValueError, message=msg): stats.tmin(x, nan_policy='foo') def test_tmax(self): assert_equal(stats.tmax(4), 4) x = np.arange(10) assert_equal(stats.tmax(x), 9) assert_equal(stats.tmax(x, upperlimit=9), 9) assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), 8) x = x.reshape((5, 2)) assert_equal(stats.tmax(x, upperlimit=9, inclusive=False), [8, 7]) assert_equal(stats.tmax(x, axis=1), [1, 3, 5, 7, 9]) assert_equal(stats.tmax(x, axis=None), 9) x = np.arange(10.) x[6] = np.nan with suppress_warnings() as sup: r = sup.record(RuntimeWarning, "invalid value*") assert_equal(stats.tmax(x), np.nan) assert_equal(stats.tmax(x, nan_policy='omit'), 9.) assert_raises(ValueError, stats.tmax, x, nan_policy='raise') assert_raises(ValueError, stats.tmax, x, nan_policy='foobar') def test_tsem(self): y = stats.tsem(X, limits=(3, 8), inclusive=(False, True)) y_ref = np.array([4, 5, 6, 7, 8]) assert_approx_equal(y, y_ref.std(ddof=1) / np.sqrt(y_ref.size), significant=self.dprec) assert_approx_equal(stats.tsem(X, limits=[-1, 10]), stats.tsem(X, limits=None), significant=self.dprec) class TestCorrPearsonr(object): """ W.II.D. Compute a correlation matrix on all the variables. All the correlations, except for ZERO and MISS, should be exactly 1. ZERO and MISS should have undefined or missing correlations with the other variables. The same should go for SPEARMAN correlations, if your program has them. """ def test_pXX(self): y = stats.pearsonr(X,X) r = y[0] assert_approx_equal(r,1.0) def test_pXBIG(self): y = stats.pearsonr(X,BIG) r = y[0] assert_approx_equal(r,1.0) def test_pXLITTLE(self): y = stats.pearsonr(X,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_pXHUGE(self): y = stats.pearsonr(X,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_pXTINY(self): y = stats.pearsonr(X,TINY) r = y[0] assert_approx_equal(r,1.0) def test_pXROUND(self): y = stats.pearsonr(X,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_pBIGBIG(self): y = stats.pearsonr(BIG,BIG) r = y[0] assert_approx_equal(r,1.0) def test_pBIGLITTLE(self): y = stats.pearsonr(BIG,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_pBIGHUGE(self): y = stats.pearsonr(BIG,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_pBIGTINY(self): y = stats.pearsonr(BIG,TINY) r = y[0] assert_approx_equal(r,1.0) def test_pBIGROUND(self): y = stats.pearsonr(BIG,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_pLITTLELITTLE(self): y = stats.pearsonr(LITTLE,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_pLITTLEHUGE(self): y = stats.pearsonr(LITTLE,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_pLITTLETINY(self): y = stats.pearsonr(LITTLE,TINY) r = y[0] assert_approx_equal(r,1.0) def test_pLITTLEROUND(self): y = stats.pearsonr(LITTLE,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_pHUGEHUGE(self): y = stats.pearsonr(HUGE,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_pHUGETINY(self): y = stats.pearsonr(HUGE,TINY) r = y[0] assert_approx_equal(r,1.0) def test_pHUGEROUND(self): y = stats.pearsonr(HUGE,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_pTINYTINY(self): y = stats.pearsonr(TINY,TINY) r = y[0] assert_approx_equal(r,1.0) def test_pTINYROUND(self): y = stats.pearsonr(TINY,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_pROUNDROUND(self): y = stats.pearsonr(ROUND,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_r_exactly_pos1(self): a = arange(3.0) b = a r, prob = stats.pearsonr(a,b) assert_equal(r, 1.0) assert_equal(prob, 0.0) def test_r_exactly_neg1(self): a = arange(3.0) b = -a r, prob = stats.pearsonr(a,b) assert_equal(r, -1.0) assert_equal(prob, 0.0) def test_basic(self): # A basic test, with a correlation coefficient # that is not 1 or -1. a = array([-1, 0, 1]) b = array([0, 0, 3]) r, prob = stats.pearsonr(a, b) assert_approx_equal(r, np.sqrt(3)/2) assert_approx_equal(prob, 1.0/3) class TestFisherExact(object): """Some tests to show that fisher_exact() works correctly. Note that in SciPy 0.9.0 this was not working well for large numbers due to inaccuracy of the hypergeom distribution (see #1218). Fixed now. Also note that R and Scipy have different argument formats for their hypergeometric distribution functions. R: > phyper(18999, 99000, 110000, 39000, lower.tail = FALSE) [1] 1.701815e-09 """ def test_basic(self): fisher_exact = stats.fisher_exact res = fisher_exact([[14500, 20000], [30000, 40000]])[1] assert_approx_equal(res, 0.01106, significant=4) res = fisher_exact([[100, 2], [1000, 5]])[1] assert_approx_equal(res, 0.1301, significant=4) res = fisher_exact([[2, 7], [8, 2]])[1] assert_approx_equal(res, 0.0230141, significant=6) res = fisher_exact([[5, 1], [10, 10]])[1] assert_approx_equal(res, 0.1973244, significant=6) res = fisher_exact([[5, 15], [20, 20]])[1] assert_approx_equal(res, 0.0958044, significant=6) res = fisher_exact([[5, 16], [20, 25]])[1] assert_approx_equal(res, 0.1725862, significant=6) res = fisher_exact([[10, 5], [10, 1]])[1] assert_approx_equal(res, 0.1973244, significant=6) res = fisher_exact([[5, 0], [1, 4]])[1] assert_approx_equal(res, 0.04761904, significant=6) res = fisher_exact([[0, 1], [3, 2]])[1] assert_approx_equal(res, 1.0) res = fisher_exact([[0, 2], [6, 4]])[1] assert_approx_equal(res, 0.4545454545) res = fisher_exact([[2, 7], [8, 2]]) assert_approx_equal(res[1], 0.0230141, significant=6) assert_approx_equal(res[0], 4.0 / 56) def test_precise(self): # results from R # # R defines oddsratio differently (see Notes section of fisher_exact # docstring), so those will not match. We leave them in anyway, in # case they will be useful later on. We test only the p-value. tablist = [ ([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)), ([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)), ([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)), ([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)), ([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)), ([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)), ([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)), ([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)), ([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)), ([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)), ([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000)) ] for table, res_r in tablist: res = stats.fisher_exact(np.asarray(table)) np.testing.assert_almost_equal(res[1], res_r[1], decimal=11, verbose=True) @pytest.mark.slow def test_large_numbers(self): # Test with some large numbers. Regression test for #1401 pvals = [5.56e-11, 2.666e-11, 1.363e-11] # from R for pval, num in zip(pvals, [75, 76, 77]): res = stats.fisher_exact([[17704, 496], [1065, num]])[1] assert_approx_equal(res, pval, significant=4) res = stats.fisher_exact([[18000, 80000], [20000, 90000]])[1] assert_approx_equal(res, 0.2751, significant=4) def test_raises(self): # test we raise an error for wrong shape of input. assert_raises(ValueError, stats.fisher_exact, np.arange(6).reshape(2, 3)) def test_row_or_col_zero(self): tables = ([[0, 0], [5, 10]], [[5, 10], [0, 0]], [[0, 5], [0, 10]], [[5, 0], [10, 0]]) for table in tables: oddsratio, pval = stats.fisher_exact(table) assert_equal(pval, 1.0) assert_equal(oddsratio, np.nan) def test_less_greater(self): tables = ( # Some tables to compare with R: [[2, 7], [8, 2]], [[200, 7], [8, 300]], [[28, 21], [6, 1957]], [[190, 800], [200, 900]], # Some tables with simple exact values # (includes regression test for ticket #1568): [[0, 2], [3, 0]], [[1, 1], [2, 1]], [[2, 0], [1, 2]], [[0, 1], [2, 3]], [[1, 0], [1, 4]], ) pvals = ( # from R: [0.018521725952066501, 0.9990149169715733], [1.0, 2.0056578803889148e-122], [1.0, 5.7284374608319831e-44], [0.7416227, 0.2959826], # Exact: [0.1, 1.0], [0.7, 0.9], [1.0, 0.3], [2./3, 1.0], [1.0, 1./3], ) for table, pval in zip(tables, pvals): res = [] res.append(stats.fisher_exact(table, alternative="less")[1]) res.append(stats.fisher_exact(table, alternative="greater")[1]) assert_allclose(res, pval, atol=0, rtol=1e-7) def test_gh3014(self): # check if issue #3014 has been fixed. # before, this would have risen a ValueError odds, pvalue = stats.fisher_exact([[1, 2], [9, 84419233]]) class TestCorrSpearmanr(object): """ W.II.D. Compute a correlation matrix on all the variables. All the correlations, except for ZERO and MISS, should be exactly 1. ZERO and MISS should have undefined or missing correlations with the other variables. The same should go for SPEARMAN corelations, if your program has them. """ def test_scalar(self): y = stats.spearmanr(4., 2.) assert_(np.isnan(y).all()) def test_uneven_lengths(self): assert_raises(ValueError, stats.spearmanr, [1, 2, 1], [8, 9]) assert_raises(ValueError, stats.spearmanr, [1, 2, 1], 8) def test_nan_policy(self): x = np.arange(10.) x[9] = np.nan assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) assert_array_equal(stats.spearmanr(x, x, nan_policy='omit'), (1.0, 0.0)) assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') def test_sXX(self): y = stats.spearmanr(X,X) r = y[0] assert_approx_equal(r,1.0) def test_sXBIG(self): y = stats.spearmanr(X,BIG) r = y[0] assert_approx_equal(r,1.0) def test_sXLITTLE(self): y = stats.spearmanr(X,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_sXHUGE(self): y = stats.spearmanr(X,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_sXTINY(self): y = stats.spearmanr(X,TINY) r = y[0] assert_approx_equal(r,1.0) def test_sXROUND(self): y = stats.spearmanr(X,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_sBIGBIG(self): y = stats.spearmanr(BIG,BIG) r = y[0] assert_approx_equal(r,1.0) def test_sBIGLITTLE(self): y = stats.spearmanr(BIG,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_sBIGHUGE(self): y = stats.spearmanr(BIG,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_sBIGTINY(self): y = stats.spearmanr(BIG,TINY) r = y[0] assert_approx_equal(r,1.0) def test_sBIGROUND(self): y = stats.spearmanr(BIG,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_sLITTLELITTLE(self): y = stats.spearmanr(LITTLE,LITTLE) r = y[0] assert_approx_equal(r,1.0) def test_sLITTLEHUGE(self): y = stats.spearmanr(LITTLE,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_sLITTLETINY(self): y = stats.spearmanr(LITTLE,TINY) r = y[0] assert_approx_equal(r,1.0) def test_sLITTLEROUND(self): y = stats.spearmanr(LITTLE,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_sHUGEHUGE(self): y = stats.spearmanr(HUGE,HUGE) r = y[0] assert_approx_equal(r,1.0) def test_sHUGETINY(self): y = stats.spearmanr(HUGE,TINY) r = y[0] assert_approx_equal(r,1.0) def test_sHUGEROUND(self): y = stats.spearmanr(HUGE,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_sTINYTINY(self): y = stats.spearmanr(TINY,TINY) r = y[0] assert_approx_equal(r,1.0) def test_sTINYROUND(self): y = stats.spearmanr(TINY,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_sROUNDROUND(self): y = stats.spearmanr(ROUND,ROUND) r = y[0] assert_approx_equal(r,1.0) def test_spearmanr_result_attributes(self): res = stats.spearmanr(X, X) attributes = ('correlation', 'pvalue') check_named_results(res, attributes) def test_spearmanr(): # Cross-check with R: # cor.test(c(1,2,3,4,5),c(5,6,7,8,7),method="spearmanr") x1 = [1, 2, 3, 4, 5] x2 = [5, 6, 7, 8, 7] expected = (0.82078268166812329, 0.088587005313543798) res = stats.spearmanr(x1, x2) assert_approx_equal(res[0], expected[0]) assert_approx_equal(res[1], expected[1]) attributes = ('correlation', 'pvalue') res = stats.spearmanr(x1, x2) check_named_results(res, attributes) # with only ties in one or both inputs with np.errstate(invalid="ignore"): assert_equal(stats.spearmanr([2,2,2], [2,2,2]), (np.nan, np.nan)) assert_equal(stats.spearmanr([2,0,2], [2,2,2]), (np.nan, np.nan)) assert_equal(stats.spearmanr([2,2,2], [2,0,2]), (np.nan, np.nan)) # empty arrays provided as input assert_equal(stats.spearmanr([], []), (np.nan, np.nan)) np.random.seed(7546) x = np.array([np.random.normal(loc=1, scale=1, size=500), np.random.normal(loc=1, scale=1, size=500)]) corr = [[1.0, 0.3], [0.3, 1.0]] x = np.dot(np.linalg.cholesky(corr), x) expected = (0.28659685838743354, 6.579862219051161e-11) res = stats.spearmanr(x[0], x[1]) assert_approx_equal(res[0], expected[0]) assert_approx_equal(res[1], expected[1]) assert_approx_equal(stats.spearmanr([1,1,2], [1,1,2])[0], 1.0) # test nan_policy x = np.arange(10.) x[9] = np.nan assert_array_equal(stats.spearmanr(x, x), (np.nan, np.nan)) assert_allclose(stats.spearmanr(x, x, nan_policy='omit'), (1.0, 0)) assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='raise') assert_raises(ValueError, stats.spearmanr, x, x, nan_policy='foobar') # test unequal length inputs x = np.arange(10.) y = np.arange(20.) assert_raises(ValueError, stats.spearmanr, x, y) #test paired value x1 = [1, 2, 3, 4] x2 = [8, 7, 6, np.nan] res1 = stats.spearmanr(x1, x2, nan_policy='omit') res2 = stats.spearmanr(x1[:3], x2[:3], nan_policy='omit') assert_equal(res1, res2) # Regression test for GitHub issue #6061 - Overflow on Windows x = list(range(2000)) y = list(range(2000)) y[0], y[9] = y[9], y[0] y[10], y[434] = y[434], y[10] y[435], y[1509] = y[1509], y[435] # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) # = 1 - (1 / 500) # = 0.998 x.append(np.nan) y.append(3.0) assert_almost_equal(stats.spearmanr(x, y, nan_policy='omit')[0], 0.998) class TestCorrSpearmanrTies(object): """Some tests of tie-handling by the spearmanr function.""" def test_tie1(self): # Data x = [1.0, 2.0, 3.0, 4.0] y = [1.0, 2.0, 2.0, 3.0] # Ranks of the data, with tie-handling. xr = [1.0, 2.0, 3.0, 4.0] yr = [1.0, 2.5, 2.5, 4.0] # Result of spearmanr should be the same as applying # pearsonr to the ranks. sr = stats.spearmanr(x, y) pr = stats.pearsonr(xr, yr) assert_almost_equal(sr, pr) def test_tie2(self): # Test tie-handling if inputs contain nan's # Data without nan's x1 = [1, 2, 2.5, 2] y1 = [1, 3, 2.5, 4] # Same data with nan's x2 = [1, 2, 2.5, 2, np.nan] y2 = [1, 3, 2.5, 4, np.nan] # Results for two data sets should be the same if nan's are ignored sr1 = stats.spearmanr(x1, y1) sr2 = stats.spearmanr(x2, y2, nan_policy='omit') assert_almost_equal(sr1, sr2) # W.II.E. Tabulate X against X, using BIG as a case weight. The values # should appear on the diagonal and the total should be 899999955. # If the table cannot hold these values, forget about working with # census data. You can also tabulate HUGE against TINY. There is no # reason a tabulation program should not be able to distinguish # different values regardless of their magnitude. # I need to figure out how to do this one. def test_kendalltau(): # with some ties # Cross-check with R: # cor.test(c(12,2,1,12,2),c(1,4,7,1,0),method="kendall",exact=FALSE) x1 = [12, 2, 1, 12, 2] x2 = [1, 4, 7, 1, 0] expected = (-0.47140452079103173, 0.28274545993277478) res = stats.kendalltau(x1, x2) assert_approx_equal(res[0], expected[0]) assert_approx_equal(res[1], expected[1]) # test for namedtuple attribute results attributes = ('correlation', 'pvalue') res = stats.kendalltau(x1, x2) check_named_results(res, attributes) # with only ties in one or both inputs assert_equal(stats.kendalltau([2,2,2], [2,2,2]), (np.nan, np.nan)) assert_equal(stats.kendalltau([2,0,2], [2,2,2]), (np.nan, np.nan)) assert_equal(stats.kendalltau([2,2,2], [2,0,2]), (np.nan, np.nan)) # empty arrays provided as input assert_equal(stats.kendalltau([], []), (np.nan, np.nan)) # check with larger arrays np.random.seed(7546) x = np.array([np.random.normal(loc=1, scale=1, size=500), np.random.normal(loc=1, scale=1, size=500)]) corr = [[1.0, 0.3], [0.3, 1.0]] x = np.dot(np.linalg.cholesky(corr), x) expected = (0.19291382765531062, 1.1337095377742629e-10) res = stats.kendalltau(x[0], x[1]) assert_approx_equal(res[0], expected[0]) assert_approx_equal(res[1], expected[1]) # and do we get a tau of 1 for identical inputs? assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0) # test nan_policy x = np.arange(10.) x[9] = np.nan assert_array_equal(stats.kendalltau(x, x), (np.nan, np.nan)) assert_allclose(stats.kendalltau(x, x, nan_policy='omit'), (1.0, 0.00017455009626808976), rtol=1e-06) assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='raise') assert_raises(ValueError, stats.kendalltau, x, x, nan_policy='foobar') # test unequal length inputs x = np.arange(10.) y = np.arange(20.) assert_raises(ValueError, stats.kendalltau, x, y) # test all ties tau, p_value = stats.kendalltau([], []) assert_equal(np.nan, tau) assert_equal(np.nan, p_value) tau, p_value = stats.kendalltau([0], [0]) assert_equal(np.nan, tau) assert_equal(np.nan, p_value) # Regression test for GitHub issue #6061 - Overflow on Windows x = np.arange(2000, dtype=float) x = np.ma.masked_greater(x, 1995) y = np.arange(2000, dtype=float) y = np.concatenate((y[1000:], y[:1000])) assert_(np.isfinite(stats.kendalltau(x,y)[1])) def test_kendalltau_vs_mstats_basic(): np.random.seed(42) for s in range(2,10): a = [] # Generate rankings with ties for i in range(s): a += [i]*i b = list(a) np.random.shuffle(a) np.random.shuffle(b) expected = mstats_basic.kendalltau(a, b) actual = stats.kendalltau(a, b) assert_approx_equal(actual[0], expected[0]) assert_approx_equal(actual[1], expected[1]) def test_kendalltau_nan_2nd_arg(): # regression test for gh-6134: nans in the second arg were not handled x = [1., 2., 3., 4.] y = [np.nan, 2.4, 3.4, 3.4] r1 = stats.kendalltau(x, y, nan_policy='omit') r2 = stats.kendalltau(x[1:], y[1:]) assert_allclose(r1.correlation, r2.correlation, atol=1e-15) def test_weightedtau(): x = [12, 2, 1, 12, 2] y = [1, 4, 7, 1, 0] tau, p_value = stats.weightedtau(x, y) assert_approx_equal(tau, -0.56694968153682723) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(x, y, additive=False) assert_approx_equal(tau, -0.62205716951801038) assert_equal(np.nan, p_value) # This must be exactly Kendall's tau tau, p_value = stats.weightedtau(x, y, weigher=lambda x: 1) assert_approx_equal(tau, -0.47140452079103173) assert_equal(np.nan, p_value) # Asymmetric, ranked version tau, p_value = stats.weightedtau(x, y, rank=None) assert_approx_equal(tau, -0.4157652301037516) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(y, x, rank=None) assert_approx_equal(tau, -0.7181341329699029) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(x, y, rank=None, additive=False) assert_approx_equal(tau, -0.40644850966246893) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(y, x, rank=None, additive=False) assert_approx_equal(tau, -0.83766582937355172) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(x, y, rank=False) assert_approx_equal(tau, -0.51604397940261848) assert_equal(np.nan, p_value) # This must be exactly Kendall's tau tau, p_value = stats.weightedtau(x, y, rank=True, weigher=lambda x: 1) assert_approx_equal(tau, -0.47140452079103173) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau(y, x, rank=True, weigher=lambda x: 1) assert_approx_equal(tau, -0.47140452079103173) assert_equal(np.nan, p_value) # Test argument conversion tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), y) assert_approx_equal(tau, -0.56694968153682723) tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.int16), y) assert_approx_equal(tau, -0.56694968153682723) tau, p_value = stats.weightedtau(np.asarray(x, dtype=np.float64), np.asarray(y, dtype=np.float64)) assert_approx_equal(tau, -0.56694968153682723) # All ties tau, p_value = stats.weightedtau([], []) assert_equal(np.nan, tau) assert_equal(np.nan, p_value) tau, p_value = stats.weightedtau([0], [0]) assert_equal(np.nan, tau) assert_equal(np.nan, p_value) # Size mismatches assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1, 2]) assert_raises(ValueError, stats.weightedtau, [0, 1], [0, 1], [0]) # NaNs x = [12, 2, 1, 12, 2] y = [1, 4, 7, 1, np.nan] tau, p_value = stats.weightedtau(x, y) assert_approx_equal(tau, -0.56694968153682723) x = [12, 2, np.nan, 12, 2] tau, p_value = stats.weightedtau(x, y) assert_approx_equal(tau, -0.56694968153682723) def test_weightedtau_vs_quadratic(): # Trivial quadratic implementation, all parameters mandatory def wkq(x, y, rank, weigher, add): tot = conc = disc = u = v = 0 for i in range(len(x)): for j in range(len(x)): w = weigher(rank[i]) + weigher(rank[j]) if add else weigher(rank[i]) * weigher(rank[j]) tot += w if x[i] == x[j]: u += w if y[i] == y[j]: v += w if x[i] < x[j] and y[i] < y[j] or x[i] > x[j] and y[i] > y[j]: conc += w elif x[i] < x[j] and y[i] > y[j] or x[i] > x[j] and y[i] < y[j]: disc += w return (conc - disc) / np.sqrt(tot - u) / np.sqrt(tot - v) np.random.seed(42) for s in range(3,10): a = [] # Generate rankings with ties for i in range(s): a += [i]*i b = list(a) np.random.shuffle(a) np.random.shuffle(b) # First pass: use element indices as ranks rank = np.arange(len(a), dtype=np.intp) for _ in range(2): for add in [True, False]: expected = wkq(a, b, rank, lambda x: 1./(x+1), add) actual = stats.weightedtau(a, b, rank, lambda x: 1./(x+1), add).correlation assert_approx_equal(expected, actual) # Second pass: use a random rank np.random.shuffle(rank) class TestFindRepeats(object): def test_basic(self): a = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 5] res, nums = stats.find_repeats(a) assert_array_equal(res, [1, 2, 3, 4]) assert_array_equal(nums, [3, 3, 2, 2]) def test_empty_result(self): # Check that empty arrays are returned when there are no repeats. for a in [[10, 20, 50, 30, 40], []]: repeated, counts = stats.find_repeats(a) assert_array_equal(repeated, []) assert_array_equal(counts, []) class TestRegression(object): def test_linregressBIGX(self): # W.II.F. Regress BIG on X. # The constant should be 99999990 and the regression coefficient should be 1. y = stats.linregress(X,BIG) intercept = y[1] r = y[2] assert_almost_equal(intercept,99999990) assert_almost_equal(r,1.0) def test_regressXX(self): # W.IV.B. Regress X on X. # The constant should be exactly 0 and the regression coefficient should be 1. # This is a perfectly valid regression. The program should not complain. y = stats.linregress(X,X) intercept = y[1] r = y[2] assert_almost_equal(intercept,0.0) assert_almost_equal(r,1.0) # W.IV.C. Regress X on BIG and LITTLE (two predictors). The program # should tell you that this model is "singular" because BIG and # LITTLE are linear combinations of each other. Cryptic error # messages are unacceptable here. Singularity is the most # fundamental regression error. # Need to figure out how to handle multiple linear regression. Not obvious def test_regressZEROX(self): # W.IV.D. Regress ZERO on X. # The program should inform you that ZERO has no variance or it should # go ahead and compute the regression and report a correlation and # total sum of squares of exactly 0. y = stats.linregress(X,ZERO) intercept = y[1] r = y[2] assert_almost_equal(intercept,0.0) assert_almost_equal(r,0.0) def test_regress_simple(self): # Regress a line with sinusoidal noise. x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) res = stats.linregress(x, y) assert_almost_equal(res[4], 2.3957814497838803e-3) def test_regress_simple_onearg_rows(self): # Regress a line w sinusoidal noise, with a single input of shape (2, N). x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) rows = np.vstack((x, y)) res = stats.linregress(rows) assert_almost_equal(res[4], 2.3957814497838803e-3) def test_regress_simple_onearg_cols(self): x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1))) res = stats.linregress(cols) assert_almost_equal(res[4], 2.3957814497838803e-3) def test_regress_shape_error(self): # Check that a single input argument to linregress with wrong shape # results in a ValueError. assert_raises(ValueError, stats.linregress, np.ones((3, 3))) def test_linregress(self): # compared with multivariate ols with pinv x = np.arange(11) y = np.arange(5,16) y[[(1),(-2)]] -= 1 y[[(0),(-1)]] += 1 res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733) assert_array_almost_equal(stats.linregress(x,y),res,decimal=14) def test_regress_simple_negative_cor(self): # If the slope of the regression is negative the factor R tend to -1 not 1. # Sometimes rounding errors makes it < -1 leading to stderr being NaN a, n = 1e-71, 100000 x = np.linspace(a, 2 * a, n) y = np.linspace(2 * a, a, n) stats.linregress(x, y) res = stats.linregress(x, y) assert_(res[2] >= -1) # propagated numerical errors were not corrected assert_almost_equal(res[2], -1) # perfect negative correlation case assert_(not np.isnan(res[4])) # stderr should stay finite def test_linregress_result_attributes(self): # Regress a line with sinusoidal noise. x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) res = stats.linregress(x, y) attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') check_named_results(res, attributes) def test_regress_two_inputs(self): # Regress a simple line formed by two points. x = np.arange(2) y = np.arange(3, 5) res = stats.linregress(x, y) assert_almost_equal(res[3], 0.0) # non-horizontal line assert_almost_equal(res[4], 0.0) # zero stderr def test_regress_two_inputs_horizontal_line(self): # Regress a horizontal line formed by two points. x = np.arange(2) y = np.ones(2) res = stats.linregress(x, y) assert_almost_equal(res[3], 1.0) # horizontal line assert_almost_equal(res[4], 0.0) # zero stderr def test_nist_norris(self): x = [0.2, 337.4, 118.2, 884.6, 10.1, 226.5, 666.3, 996.3, 448.6, 777.0, 558.2, 0.4, 0.6, 775.5, 666.9, 338.0, 447.5, 11.6, 556.0, 228.1, 995.8, 887.6, 120.2, 0.3, 0.3, 556.8, 339.1, 887.2, 999.0, 779.0, 11.1, 118.3, 229.2, 669.1, 448.9, 0.5] y = [0.1, 338.8, 118.1, 888.0, 9.2, 228.1, 668.5, 998.5, 449.1, 778.9, 559.2, 0.3, 0.1, 778.1, 668.8, 339.3, 448.9, 10.8, 557.7, 228.3, 998.0, 888.8, 119.6, 0.3, 0.6, 557.6, 339.3, 888.0, 998.5, 778.9, 10.2, 117.6, 228.9, 668.4, 449.2, 0.2] # Expected values exp_slope = 1.00211681802045 exp_intercept = -0.262323073774029 exp_rvalue = 0.999993745883712 actual = stats.linregress(x, y) assert_almost_equal(actual.slope, exp_slope) assert_almost_equal(actual.intercept, exp_intercept) assert_almost_equal(actual.rvalue, exp_rvalue, decimal=5) def test_empty_input(self): assert_raises(ValueError, stats.linregress, [], []) def test_nan_input(self): x = np.arange(10.) x[9] = np.nan with np.errstate(invalid="ignore"): assert_array_equal(stats.linregress(x, x), (np.nan, np.nan, np.nan, np.nan, np.nan)) def test_theilslopes(): # Basic slope test. slope, intercept, lower, upper = stats.theilslopes([0,1,1]) assert_almost_equal(slope, 0.5) assert_almost_equal(intercept, 0.5) # Test of confidence intervals. x = [1, 2, 3, 4, 10, 12, 18] y = [9, 15, 19, 20, 45, 55, 78] slope, intercept, lower, upper = stats.theilslopes(y, x, 0.07) assert_almost_equal(slope, 4) assert_almost_equal(upper, 4.38, decimal=2) assert_almost_equal(lower, 3.71, decimal=2) def test_cumfreq(): x = [1, 4, 2, 1, 3, 1] cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4) assert_array_almost_equal(cumfreqs, np.array([3., 4., 5., 6.])) cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) assert_(extrapoints == 3) # test for namedtuple attribute results attributes = ('cumcount', 'lowerlimit', 'binsize', 'extrapoints') res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5)) check_named_results(res, attributes) def test_relfreq(): a = np.array([1, 4, 2, 1, 3, 1]) relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4) assert_array_almost_equal(relfreqs, array([0.5, 0.16666667, 0.16666667, 0.16666667])) # test for namedtuple attribute results attributes = ('frequency', 'lowerlimit', 'binsize', 'extrapoints') res = stats.relfreq(a, numbins=4) check_named_results(res, attributes) # check array_like input is accepted relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], numbins=4) assert_array_almost_equal(relfreqs, relfreqs2) class TestGMean(object): def test_1D_list(self): a = (1,2,3,4) actual = stats.gmean(a) desired = power(1*2*3*4,1./4.) assert_almost_equal(actual, desired,decimal=14) desired1 = stats.gmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=14) def test_1D_array(self): a = array((1,2,3,4), float32) actual = stats.gmean(a) desired = power(1*2*3*4,1./4.) assert_almost_equal(actual, desired, decimal=7) desired1 = stats.gmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=7) def test_2D_array_default(self): a = array(((1,2,3,4), (1,2,3,4), (1,2,3,4))) actual = stats.gmean(a) desired = array((1,2,3,4)) assert_array_almost_equal(actual, desired, decimal=14) desired1 = stats.gmean(a,axis=0) assert_array_almost_equal(actual, desired1, decimal=14) def test_2D_array_dim1(self): a = array(((1,2,3,4), (1,2,3,4), (1,2,3,4))) actual = stats.gmean(a, axis=1) v = power(1*2*3*4,1./4.) desired = array((v,v,v)) assert_array_almost_equal(actual, desired, decimal=14) def test_large_values(self): a = array([1e100, 1e200, 1e300]) actual = stats.gmean(a) assert_approx_equal(actual, 1e200, significant=13) class TestHMean(object): def test_1D_list(self): a = (1,2,3,4) actual = stats.hmean(a) desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) assert_almost_equal(actual, desired, decimal=14) desired1 = stats.hmean(array(a),axis=-1) assert_almost_equal(actual, desired1, decimal=14) def test_1D_array(self): a = array((1,2,3,4), float64) actual = stats.hmean(a) desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) assert_almost_equal(actual, desired, decimal=14) desired1 = stats.hmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=14) def test_2D_array_default(self): a = array(((1,2,3,4), (1,2,3,4), (1,2,3,4))) actual = stats.hmean(a) desired = array((1.,2.,3.,4.)) assert_array_almost_equal(actual, desired, decimal=14) actual1 = stats.hmean(a,axis=0) assert_array_almost_equal(actual1, desired, decimal=14) def test_2D_array_dim1(self): a = array(((1,2,3,4), (1,2,3,4), (1,2,3,4))) v = 4. / (1./1 + 1./2 + 1./3 + 1./4) desired1 = array((v,v,v)) actual1 = stats.hmean(a, axis=1) assert_array_almost_equal(actual1, desired1, decimal=14) class TestScoreatpercentile(object): def setup_method(self): self.a1 = [3, 4, 5, 10, -3, -5, 6] self.a2 = [3, -6, -2, 8, 7, 4, 2, 1] self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0] def test_basic(self): x = arange(8) * 0.5 assert_equal(stats.scoreatpercentile(x, 0), 0.) assert_equal(stats.scoreatpercentile(x, 100), 3.5) assert_equal(stats.scoreatpercentile(x, 50), 1.75) def test_fraction(self): scoreatperc = stats.scoreatpercentile # Test defaults assert_equal(scoreatperc(list(range(10)), 50), 4.5) assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10,100)), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10)), 5.5) # explicitly specify interpolation_method 'fraction' (the default) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (10, 100), interpolation_method='fraction'), 55) assert_equal(scoreatperc(np.array([1, 10,100]), 50, (1,10), interpolation_method='fraction'), 5.5) def test_lower_higher(self): scoreatperc = stats.scoreatpercentile # interpolation_method 'lower'/'higher' assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(10)), 50, (2,7), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), interpolation_method='higher'), 5) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='lower'), 4) assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='higher'), 5) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (10, 100), interpolation_method='lower'), 10) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(10, 100), interpolation_method='higher'), 100) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, (1, 10), interpolation_method='lower'), 1) assert_equal(scoreatperc(np.array([1, 10, 100]), 50, limit=(1, 10), interpolation_method='higher'), 10) def test_sequence_per(self): x = arange(8) * 0.5 expected = np.array([0, 3.5, 1.75]) res = stats.scoreatpercentile(x, [0, 100, 50]) assert_allclose(res, expected) assert_(isinstance(res, np.ndarray)) # Test with ndarray. Regression test for gh-2861 assert_allclose(stats.scoreatpercentile(x, np.array([0, 100, 50])), expected) # Also test combination of 2-D array, axis not None and array-like per res2 = stats.scoreatpercentile(np.arange(12).reshape((3,4)), np.array([0, 1, 100, 100]), axis=1) expected2 = array([[0, 4, 8], [0.03, 4.03, 8.03], [3, 7, 11], [3, 7, 11]]) assert_allclose(res2, expected2) def test_axis(self): scoreatperc = stats.scoreatpercentile x = arange(12).reshape(3, 4) assert_equal(scoreatperc(x, (25, 50, 100)), [2.75, 5.5, 11.0]) r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] assert_equal(scoreatperc(x, (25, 50, 100), axis=0), r0) r1 = [[0.75, 4.75, 8.75], [1.5, 5.5, 9.5], [3, 7, 11]] assert_equal(scoreatperc(x, (25, 50, 100), axis=1), r1) x = array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) score = stats.scoreatpercentile(x, 50) assert_equal(score.shape, ()) assert_equal(score, 1.0) score = stats.scoreatpercentile(x, 50, axis=0) assert_equal(score.shape, (3,)) assert_equal(score, [1, 1, 1]) def test_exception(self): assert_raises(ValueError, stats.scoreatpercentile, [1, 2], 56, interpolation_method='foobar') assert_raises(ValueError, stats.scoreatpercentile, [1], 101) assert_raises(ValueError, stats.scoreatpercentile, [1], -1) def test_empty(self): assert_equal(stats.scoreatpercentile([], 50), np.nan) assert_equal(stats.scoreatpercentile(np.array([[], []]), 50), np.nan) assert_equal(stats.scoreatpercentile([], [50, 99]), [np.nan, np.nan]) class TestItemfreq(object): a = [5, 7, 1, 2, 1, 5, 7] * 10 b = [1, 2, 5, 7] def test_numeric_types(self): # Check itemfreq works for all dtypes (adapted from np.unique tests) def _check_itemfreq(dt): a = np.array(self.a, dt) with suppress_warnings() as sup: sup.filter(DeprecationWarning) v = stats.itemfreq(a) assert_array_equal(v[:, 0], [1, 2, 5, 7]) assert_array_equal(v[:, 1], np.array([20, 10, 20, 20], dtype=dt)) dtypes = [np.int32, np.int64, np.float32, np.float64, np.complex64, np.complex128] for dt in dtypes: _check_itemfreq(dt) def test_object_arrays(self): a, b = self.a, self.b dt = 'O' aa = np.empty(len(a), dt) aa[:] = a bb = np.empty(len(b), dt) bb[:] = b with suppress_warnings() as sup: sup.filter(DeprecationWarning) v = stats.itemfreq(aa) assert_array_equal(v[:, 0], bb) def test_structured_arrays(self): a, b = self.a, self.b dt = [('', 'i'), ('', 'i')] aa = np.array(list(zip(a, a)), dt) bb = np.array(list(zip(b, b)), dt) with suppress_warnings() as sup: sup.filter(DeprecationWarning) v = stats.itemfreq(aa) # Arrays don't compare equal because v[:,0] is object array assert_equal(tuple(v[2, 0]), tuple(bb[2])) class TestMode(object): def test_empty(self): vals, counts = stats.mode([]) assert_equal(vals, np.array([])) assert_equal(counts, np.array([])) def test_scalar(self): vals, counts = stats.mode(4.) assert_equal(vals, np.array([4.])) assert_equal(counts, np.array([1])) def test_basic(self): data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] vals = stats.mode(data1) assert_equal(vals[0][0], 6) assert_equal(vals[1][0], 3) def test_axes(self): data1 = [10, 10, 30, 40] data2 = [10, 10, 10, 10] data3 = [20, 10, 20, 20] data4 = [30, 30, 30, 30] data5 = [40, 30, 30, 30] arr = np.array([data1, data2, data3, data4, data5]) vals = stats.mode(arr, axis=None) assert_equal(vals[0], np.array([30])) assert_equal(vals[1], np.array([8])) vals = stats.mode(arr, axis=0) assert_equal(vals[0], np.array([[10, 10, 30, 30]])) assert_equal(vals[1], np.array([[2, 3, 3, 2]])) vals = stats.mode(arr, axis=1) assert_equal(vals[0], np.array([[10], [10], [20], [30], [30]])) assert_equal(vals[1], np.array([[2], [4], [3], [4], [3]])) def test_strings(self): data1 = ['rain', 'showers', 'showers'] with suppress_warnings() as sup: r = sup.record(RuntimeWarning, ".*checked for nan values") vals = stats.mode(data1) assert_equal(len(r), 1) assert_equal(vals[0][0], 'showers') assert_equal(vals[1][0], 2) @pytest.mark.xfail(sys.version_info > (3,), reason='numpy github issue 641') def test_mixed_objects(self): objects = [10, True, np.nan, 'hello', 10] arr = np.empty((5,), dtype=object) arr[:] = objects with suppress_warnings() as sup: r = sup.record(RuntimeWarning, ".*checked for nan values") vals = stats.mode(arr) assert_equal(len(r), 1) assert_equal(vals[0][0], 10) assert_equal(vals[1][0], 2) def test_objects(self): # Python objects must be sortable (le + eq) and have ne defined # for np.unique to work. hash is for set. class Point(object): def __init__(self, x): self.x = x def __eq__(self, other): return self.x == other.x def __ne__(self, other): return self.x != other.x def __lt__(self, other): return self.x < other.x def __hash__(self): return hash(self.x) points = [Point(x) for x in [1, 2, 3, 4, 3, 2, 2, 2]] arr = np.empty((8,), dtype=object) arr[:] = points assert_(len(set(points)) == 4) assert_equal(np.unique(arr).shape, (4,)) with suppress_warnings() as sup: r = sup.record(RuntimeWarning, ".*checked for nan values") vals = stats.mode(arr) assert_equal(len(r), 1) assert_equal(vals[0][0], Point(2)) assert_equal(vals[1][0], 4) def test_mode_result_attributes(self): data1 = [3, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] data2 = [] actual = stats.mode(data1) attributes = ('mode', 'count') check_named_results(actual, attributes) actual2 = stats.mode(data2) check_named_results(actual2, attributes) def test_mode_nan(self): data1 = [3, np.nan, 5, 1, 10, 23, 3, 2, 6, 8, 6, 10, 6] actual = stats.mode(data1) assert_equal(actual, (6, 3)) actual = stats.mode(data1, nan_policy='omit') assert_equal(actual, (6, 3)) assert_raises(ValueError, stats.mode, data1, nan_policy='raise') assert_raises(ValueError, stats.mode, data1, nan_policy='foobar') class TestVariability(object): testcase = [1,2,3,4] scalar_testcase = 4. def test_sem(self): # This is not in R, so used: # sqrt(var(testcase)*3/4)/sqrt(3) # y = stats.sem(self.shoes[0]) # assert_approx_equal(y,0.775177399) with suppress_warnings() as sup, np.errstate(invalid="ignore"): sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") y = stats.sem(self.scalar_testcase) assert_(np.isnan(y)) y = stats.sem(self.testcase) assert_approx_equal(y, 0.6454972244) n = len(self.testcase) assert_allclose(stats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), stats.sem(self.testcase, ddof=2)) x = np.arange(10.) x[9] = np.nan assert_equal(stats.sem(x), np.nan) assert_equal(stats.sem(x, nan_policy='omit'), 0.9128709291752769) assert_raises(ValueError, stats.sem, x, nan_policy='raise') assert_raises(ValueError, stats.sem, x, nan_policy='foobar') def test_zmap(self): # not in R, so tested by using: # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) y = stats.zmap(self.testcase,self.testcase) desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]) assert_array_almost_equal(desired,y,decimal=12) def test_zmap_axis(self): # Test use of 'axis' keyword in zmap. x = np.array([[0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 2.0], [2.0, 0.0, 2.0, 0.0]]) t1 = 1.0/np.sqrt(2.0/3) t2 = np.sqrt(3.)/3 t3 = np.sqrt(2.) z0 = stats.zmap(x, x, axis=0) z1 = stats.zmap(x, x, axis=1) z0_expected = [[-t1, -t3/2, -t3/2, 0.0], [0.0, t3, -t3/2, t1], [t1, -t3/2, t3, -t1]] z1_expected = [[-1.0, -1.0, 1.0, 1.0], [-t2, -t2, -t2, np.sqrt(3.)], [1.0, -1.0, 1.0, -1.0]] assert_array_almost_equal(z0, z0_expected) assert_array_almost_equal(z1, z1_expected) def test_zmap_ddof(self): # Test use of 'ddof' keyword in zmap. x = np.array([[0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 2.0, 3.0]]) z = stats.zmap(x, x, axis=1, ddof=1) z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) assert_array_almost_equal(z[0], z0_expected) assert_array_almost_equal(z[1], z1_expected) def test_zscore(self): # not in R, so tested by using: # (testcase[i] - mean(testcase, axis=0)) / sqrt(var(testcase) * 3/4) y = stats.zscore(self.testcase) desired = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]) assert_array_almost_equal(desired,y,decimal=12) def test_zscore_axis(self): # Test use of 'axis' keyword in zscore. x = np.array([[0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 1.0, 2.0], [2.0, 0.0, 2.0, 0.0]]) t1 = 1.0/np.sqrt(2.0/3) t2 = np.sqrt(3.)/3 t3 = np.sqrt(2.) z0 = stats.zscore(x, axis=0) z1 = stats.zscore(x, axis=1) z0_expected = [[-t1, -t3/2, -t3/2, 0.0], [0.0, t3, -t3/2, t1], [t1, -t3/2, t3, -t1]] z1_expected = [[-1.0, -1.0, 1.0, 1.0], [-t2, -t2, -t2, np.sqrt(3.)], [1.0, -1.0, 1.0, -1.0]] assert_array_almost_equal(z0, z0_expected) assert_array_almost_equal(z1, z1_expected) def test_zscore_ddof(self): # Test use of 'ddof' keyword in zscore. x = np.array([[0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 2.0, 3.0]]) z = stats.zscore(x, axis=1, ddof=1) z0_expected = np.array([-0.5, -0.5, 0.5, 0.5])/(1.0/np.sqrt(3)) z1_expected = np.array([-1.5, -0.5, 0.5, 1.5])/(np.sqrt(5./3)) assert_array_almost_equal(z[0], z0_expected) assert_array_almost_equal(z[1], z1_expected) class _numpy_version_warn_context_mgr(object): """ A simple context maneger class to avoid retyping the same code for different versions of numpy when the only difference is that older versions raise warnings. This manager does not apply for cases where the old code returns different values. """ def __init__(self, min_numpy_version, warning_type, num_warnings): if NumpyVersion(np.__version__) < min_numpy_version: self.numpy_is_old = True self.warning_type = warning_type self.num_warnings = num_warnings self.delegate = warnings.catch_warnings(record = True) else: self.numpy_is_old = False def __enter__(self): if self.numpy_is_old: self.warn_list = self.delegate.__enter__() warnings.simplefilter("always") return None def __exit__(self, exc_type, exc_value, traceback): if self.numpy_is_old: self.delegate.__exit__(exc_type, exc_value, traceback) _check_warnings(self.warn_list, self.warning_type, self.num_warnings) def _check_warnings(warn_list, expected_type, expected_len): """ Checks that all of the warnings from a list returned by `warnings.catch_all(record=True)` are of the required type and that the list contains expected number of warnings. """ assert_equal(len(warn_list), expected_len, "number of warnings") for warn_ in warn_list: assert_(warn_.category is expected_type) class TestIQR(object): def test_basic(self): x = np.arange(8) * 0.5 np.random.shuffle(x) assert_equal(stats.iqr(x), 1.75) def test_api(self): d = np.ones((5, 5)) stats.iqr(d) stats.iqr(d, None) stats.iqr(d, 1) stats.iqr(d, (0, 1)) stats.iqr(d, None, (10, 90)) stats.iqr(d, None, (30, 20), 'raw') stats.iqr(d, None, (25, 75), 1.5, 'propagate') if NumpyVersion(np.__version__) >= '1.9.0a': stats.iqr(d, None, (50, 50), 'normal', 'raise', 'linear') stats.iqr(d, None, (25, 75), -0.4, 'omit', 'lower', True) def test_empty(self): assert_equal(stats.iqr([]), np.nan) assert_equal(stats.iqr(np.arange(0)), np.nan) def test_constant(self): # Constant array always gives 0 x = np.ones((7, 4)) assert_equal(stats.iqr(x), 0.0) assert_array_equal(stats.iqr(x, axis=0), np.zeros(4)) assert_array_equal(stats.iqr(x, axis=1), np.zeros(7)) # Even for older versions, 'linear' does not raise a warning with _numpy_version_warn_context_mgr('1.9.0a', RuntimeWarning, 4): assert_equal(stats.iqr(x, interpolation='linear'), 0.0) assert_equal(stats.iqr(x, interpolation='midpoint'), 0.0) assert_equal(stats.iqr(x, interpolation='nearest'), 0.0) assert_equal(stats.iqr(x, interpolation='lower'), 0.0) assert_equal(stats.iqr(x, interpolation='higher'), 0.0) # 0 only along constant dimensions # This also tests much of `axis` y = np.ones((4, 5, 6)) * np.arange(6) assert_array_equal(stats.iqr(y, axis=0), np.zeros((5, 6))) assert_array_equal(stats.iqr(y, axis=1), np.zeros((4, 6))) assert_array_equal(stats.iqr(y, axis=2), 2.5 * np.ones((4, 5))) assert_array_equal(stats.iqr(y, axis=(0, 1)), np.zeros(6)) assert_array_equal(stats.iqr(y, axis=(0, 2)), 3. * np.ones(5)) assert_array_equal(stats.iqr(y, axis=(1, 2)), 3. * np.ones(4)) def test_scalarlike(self): x = np.arange(1) + 7.0 assert_equal(stats.iqr(x[0]), 0.0) assert_equal(stats.iqr(x), 0.0) if NumpyVersion(np.__version__) >= '1.9.0a': assert_array_equal(stats.iqr(x, keepdims=True), [0.0]) else: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") assert_array_equal(stats.iqr(x, keepdims=True), 0.0) _check_warnings(w, RuntimeWarning, 1) def test_2D(self): x = np.arange(15).reshape((3, 5)) assert_equal(stats.iqr(x), 7.0) assert_array_equal(stats.iqr(x, axis=0), 5. * np.ones(5)) assert_array_equal(stats.iqr(x, axis=1), 2. * np.ones(3)) assert_array_equal(stats.iqr(x, axis=(0, 1)), 7.0) assert_array_equal(stats.iqr(x, axis=(1, 0)), 7.0) def test_axis(self): # The `axis` keyword is also put through its paces in `test_keepdims`. o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) # x.shape = (71, 23, 10) q = stats.iqr(o) assert_equal(stats.iqr(x, axis=(0, 1)), q) x = np.rollaxis(x, -1, 0) # x.shape = (10, 71, 23) assert_equal(stats.iqr(x, axis=(2, 1)), q) x = x.swapaxes(0, 1) # x.shape = (71, 10, 23) assert_equal(stats.iqr(x, axis=(0, 2)), q) x = x.swapaxes(0, 1) # x.shape = (10, 71, 23) assert_equal(stats.iqr(x, axis=(0, 1, 2)), stats.iqr(x, axis=None)) assert_equal(stats.iqr(x, axis=(0,)), stats.iqr(x, axis=0)) d = np.arange(3 * 5 * 7 * 11) # Older versions of numpy only shuffle along axis=0. # Not sure about newer, don't care. np.random.shuffle(d) d = d.reshape((3, 5, 7, 11)) assert_equal(stats.iqr(d, axis=(0, 1, 2))[0], stats.iqr(d[:,:,:, 0].ravel())) assert_equal(stats.iqr(d, axis=(0, 1, 3))[1], stats.iqr(d[:,:, 1,:].ravel())) assert_equal(stats.iqr(d, axis=(3, 1, -4))[2], stats.iqr(d[:,:, 2,:].ravel())) assert_equal(stats.iqr(d, axis=(3, 1, 2))[2], stats.iqr(d[2,:,:,:].ravel())) assert_equal(stats.iqr(d, axis=(3, 2))[2, 1], stats.iqr(d[2, 1,:,:].ravel())) assert_equal(stats.iqr(d, axis=(1, -2))[2, 1], stats.iqr(d[2, :, :, 1].ravel())) assert_equal(stats.iqr(d, axis=(1, 3))[2, 2], stats.iqr(d[2, :, 2,:].ravel())) if NumpyVersion(np.__version__) >= '1.9.0a': assert_raises(IndexError, stats.iqr, d, axis=4) else: assert_raises(ValueError, stats.iqr, d, axis=4) assert_raises(ValueError, stats.iqr, d, axis=(0, 0)) def test_rng(self): x = np.arange(5) assert_equal(stats.iqr(x), 2) assert_equal(stats.iqr(x, rng=(25, 87.5)), 2.5) assert_equal(stats.iqr(x, rng=(12.5, 75)), 2.5) assert_almost_equal(stats.iqr(x, rng=(10, 50)), 1.6) # 3-1.4 assert_raises(ValueError, stats.iqr, x, rng=(0, 101)) assert_raises(ValueError, stats.iqr, x, rng=(np.nan, 25)) assert_raises(TypeError, stats.iqr, x, rng=(0, 50, 60)) def test_interpolation(self): x = np.arange(5) y = np.arange(4) # Default assert_equal(stats.iqr(x), 2) assert_equal(stats.iqr(y), 1.5) if NumpyVersion(np.__version__) >= '1.9.0a': # Linear assert_equal(stats.iqr(x, interpolation='linear'), 2) assert_equal(stats.iqr(y, interpolation='linear'), 1.5) # Higher assert_equal(stats.iqr(x, interpolation='higher'), 2) assert_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 3) assert_equal(stats.iqr(y, interpolation='higher'), 2) # Lower (will generally, but not always be the same as higher) assert_equal(stats.iqr(x, interpolation='lower'), 2) assert_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2) assert_equal(stats.iqr(y, interpolation='lower'), 2) # Nearest assert_equal(stats.iqr(x, interpolation='nearest'), 2) assert_equal(stats.iqr(y, interpolation='nearest'), 1) # Midpoint if NumpyVersion(np.__version__) >= '1.11.0a': assert_equal(stats.iqr(x, interpolation='midpoint'), 2) assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.5) assert_equal(stats.iqr(y, interpolation='midpoint'), 2) else: # midpoint did not work correctly before numpy 1.11.0 assert_equal(stats.iqr(x, interpolation='midpoint'), 2) assert_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2) assert_equal(stats.iqr(y, interpolation='midpoint'), 2) else: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # Linear assert_equal(stats.iqr(x, interpolation='linear'), 2) assert_equal(stats.iqr(y, interpolation='linear'), 1.5) # Higher assert_equal(stats.iqr(x, interpolation='higher'), 2) assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='higher'), 2.2) assert_equal(stats.iqr(y, interpolation='higher'), 1.5) # Lower assert_equal(stats.iqr(x, interpolation='lower'), 2) assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='lower'), 2.2) assert_equal(stats.iqr(y, interpolation='lower'), 1.5) # Nearest assert_equal(stats.iqr(x, interpolation='nearest'), 2) assert_equal(stats.iqr(y, interpolation='nearest'), 1.5) # Midpoint assert_equal(stats.iqr(x, interpolation='midpoint'), 2) assert_almost_equal(stats.iqr(x, rng=(25, 80), interpolation='midpoint'), 2.2) assert_equal(stats.iqr(y, interpolation='midpoint'), 1.5) _check_warnings(w, RuntimeWarning, 11) if NumpyVersion(np.__version__) >= '1.9.0a': assert_raises(ValueError, stats.iqr, x, interpolation='foobar') else: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") assert_equal(stats.iqr(x, interpolation='foobar'), 2) _check_warnings(w, RuntimeWarning, 1) def test_keepdims(self): numpy_version = NumpyVersion(np.__version__) # Also tests most of `axis` x = np.ones((3, 5, 7, 11)) assert_equal(stats.iqr(x, axis=None, keepdims=False).shape, ()) assert_equal(stats.iqr(x, axis=2, keepdims=False).shape, (3, 5, 11)) assert_equal(stats.iqr(x, axis=(0, 1), keepdims=False).shape, (7, 11)) assert_equal(stats.iqr(x, axis=(0, 3), keepdims=False).shape, (5, 7)) assert_equal(stats.iqr(x, axis=(1,), keepdims=False).shape, (3, 7, 11)) assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=False).shape, ()) assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=False).shape, (7,)) if numpy_version >= '1.9.0a': assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 1, 11)) assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) else: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") assert_equal(stats.iqr(x, axis=None, keepdims=True).shape, ()) assert_equal(stats.iqr(x, axis=2, keepdims=True).shape, (3, 5, 11)) assert_equal(stats.iqr(x, axis=(0, 1), keepdims=True).shape, (7, 11)) assert_equal(stats.iqr(x, axis=(0, 3), keepdims=True).shape, (5, 7)) assert_equal(stats.iqr(x, axis=(1,), keepdims=True).shape, (3, 7, 11)) assert_equal(stats.iqr(x, (0, 1, 2, 3), keepdims=True).shape, ()) assert_equal(stats.iqr(x, axis=(0, 1, 3), keepdims=True).shape, (7,)) _check_warnings(w, RuntimeWarning, 7) def test_nanpolicy(self): numpy_version = NumpyVersion(np.__version__) x = np.arange(15.0).reshape((3, 5)) # No NaNs assert_equal(stats.iqr(x, nan_policy='propagate'), 7) assert_equal(stats.iqr(x, nan_policy='omit'), 7) assert_equal(stats.iqr(x, nan_policy='raise'), 7) # Yes NaNs x[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") if numpy_version < '1.10.0a': # Fails over to mishmash of omit/propagate, but mostly omit # The first case showcases the "incorrect" behavior of np.percentile assert_equal(stats.iqr(x, nan_policy='propagate'), 8) assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5]) if numpy_version < '1.9.0a': assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2]) else: # some fixes to percentile nan handling in 1.9 assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) _check_warnings(w, RuntimeWarning, 3) else: assert_equal(stats.iqr(x, nan_policy='propagate'), np.nan) assert_equal(stats.iqr(x, axis=0, nan_policy='propagate'), [5, 5, np.nan, 5, 5]) assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") if numpy_version < '1.9.0a': # Fails over to mishmash of omit/propagate, but mostly omit assert_equal(stats.iqr(x, nan_policy='omit'), 8) assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), [5, 5, np.nan, 5, 5]) assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 3, 2]) _check_warnings(w, RuntimeWarning, 3) else: assert_equal(stats.iqr(x, nan_policy='omit'), 7.5) assert_equal(stats.iqr(x, axis=0, nan_policy='omit'), 5 * np.ones(5)) assert_equal(stats.iqr(x, axis=1, nan_policy='omit'), [2, 2.5, 2]) assert_raises(ValueError, stats.iqr, x, nan_policy='raise') assert_raises(ValueError, stats.iqr, x, axis=0, nan_policy='raise') assert_raises(ValueError, stats.iqr, x, axis=1, nan_policy='raise') # Bad policy assert_raises(ValueError, stats.iqr, x, nan_policy='barfood') def test_scale(self): numpy_version = NumpyVersion(np.__version__) x = np.arange(15.0).reshape((3, 5)) # No NaNs assert_equal(stats.iqr(x, scale='raw'), 7) assert_almost_equal(stats.iqr(x, scale='normal'), 7 / 1.3489795) assert_equal(stats.iqr(x, scale=2.0), 3.5) # Yes NaNs x[1, 2] = np.nan with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") if numpy_version < '1.10.0a': # Fails over to mishmash of omit/propagate, but mostly omit assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), 8) assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), 8 / 1.3489795) assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), 4) # axis=1 chosen to show behavior with both nans and without if numpy_version < '1.9.0a': assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, 3, 2]) assert_almost_equal(stats.iqr(x, axis=1, scale='normal', nan_policy='propagate'), np.array([2, 3, 2]) / 1.3489795) assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'), [1, 1.5, 1]) else: # some fixes to percentile nan handling in 1.9 assert_equal(stats.iqr(x, axis=1, nan_policy='propagate'), [2, np.nan, 2]) assert_almost_equal(stats.iqr(x, axis=1, scale='normal', nan_policy='propagate'), np.array([2, np.nan, 2]) / 1.3489795) assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'), [1, np.nan, 1]) _check_warnings(w, RuntimeWarning, 6) else: assert_equal(stats.iqr(x, scale='raw', nan_policy='propagate'), np.nan) assert_equal(stats.iqr(x, scale='normal', nan_policy='propagate'), np.nan) assert_equal(stats.iqr(x, scale=2.0, nan_policy='propagate'), np.nan) # axis=1 chosen to show behavior with both nans and without assert_equal(stats.iqr(x, axis=1, scale='raw', nan_policy='propagate'), [2, np.nan, 2]) assert_almost_equal(stats.iqr(x, axis=1, scale='normal', nan_policy='propagate'), np.array([2, np.nan, 2]) / 1.3489795) assert_equal(stats.iqr(x, axis=1, scale=2.0, nan_policy='propagate'), [1, np.nan, 1]) _check_warnings(w, RuntimeWarning, 6) if numpy_version < '1.9.0a': with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # Fails over to mishmash of omit/propagate, but mostly omit assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 8) assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'), 8 / 1.3489795) assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 4) _check_warnings(w, RuntimeWarning, 3) else: assert_equal(stats.iqr(x, scale='raw', nan_policy='omit'), 7.5) assert_almost_equal(stats.iqr(x, scale='normal', nan_policy='omit'), 7.5 / 1.3489795) assert_equal(stats.iqr(x, scale=2.0, nan_policy='omit'), 3.75) # Bad scale assert_raises(ValueError, stats.iqr, x, scale='foobar') class TestMoments(object): """ Comparison numbers are found using R v.1.5.1 note that length(testcase) = 4 testmathworks comes from documentation for the Statistics Toolbox for Matlab and can be found at both http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml Note that both test cases came from here. """ testcase = [1,2,3,4] scalar_testcase = 4. np.random.seed(1234) testcase_moment_accuracy = np.random.rand(42) testmathworks = [1.165, 0.6268, 0.0751, 0.3516, -0.6965] def test_moment(self): # mean((testcase-mean(testcase))**power,axis=0),axis=0))**power)) y = stats.moment(self.scalar_testcase) assert_approx_equal(y, 0.0) y = stats.moment(self.testcase, 0) assert_approx_equal(y, 1.0) y = stats.moment(self.testcase, 1) assert_approx_equal(y, 0.0, 10) y = stats.moment(self.testcase, 2) assert_approx_equal(y, 1.25) y = stats.moment(self.testcase, 3) assert_approx_equal(y, 0.0) y = stats.moment(self.testcase, 4) assert_approx_equal(y, 2.5625) # check array_like input for moment y = stats.moment(self.testcase, [1, 2, 3, 4]) assert_allclose(y, [0, 1.25, 0, 2.5625]) # check moment input consists only of integers y = stats.moment(self.testcase, 0.0) assert_approx_equal(y, 1.0) assert_raises(ValueError, stats.moment, self.testcase, 1.2) y = stats.moment(self.testcase, [1.0, 2, 3, 4.0]) assert_allclose(y, [0, 1.25, 0, 2.5625]) # test empty input y = stats.moment([]) assert_equal(y, np.nan) x = np.arange(10.) x[9] = np.nan assert_equal(stats.moment(x, 2), np.nan) assert_almost_equal(stats.moment(x, nan_policy='omit'), 0.0) assert_raises(ValueError, stats.moment, x, nan_policy='raise') assert_raises(ValueError, stats.moment, x, nan_policy='foobar') def test_moment_propagate_nan(self): # Check that the shape of the result is the same for inputs # with and without nans, cf gh-5817 a = np.arange(8).reshape(2, -1).astype(float) a[1, 0] = np.nan mm = stats.moment(a, 2, axis=1, nan_policy="propagate") np.testing.assert_allclose(mm, [1.25, np.nan], atol=1e-15) def test_variation(self): # variation = samplestd / mean y = stats.variation(self.scalar_testcase) assert_approx_equal(y, 0.0) y = stats.variation(self.testcase) assert_approx_equal(y, 0.44721359549996, 10) x = np.arange(10.) x[9] = np.nan assert_equal(stats.variation(x), np.nan) assert_almost_equal(stats.variation(x, nan_policy='omit'), 0.6454972243679028) assert_raises(ValueError, stats.variation, x, nan_policy='raise') assert_raises(ValueError, stats.variation, x, nan_policy='foobar') def test_variation_propagate_nan(self): # Check that the shape of the result is the same for inputs # with and without nans, cf gh-5817 a = np.arange(8).reshape(2, -1).astype(float) a[1, 0] = np.nan vv = stats.variation(a, axis=1, nan_policy="propagate") np.testing.assert_allclose(vv, [0.7453559924999299, np.nan], atol=1e-15) def test_skewness(self): # Scalar test case y = stats.skew(self.scalar_testcase) assert_approx_equal(y, 0.0) # sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0) / # ((sqrt(var(testmathworks)*4/5))**3)/5 y = stats.skew(self.testmathworks) assert_approx_equal(y, -0.29322304336607, 10) y = stats.skew(self.testmathworks, bias=0) assert_approx_equal(y, -0.437111105023940, 10) y = stats.skew(self.testcase) assert_approx_equal(y, 0.0, 10) x = np.arange(10.) x[9] = np.nan with np.errstate(invalid='ignore'): assert_equal(stats.skew(x), np.nan) assert_equal(stats.skew(x, nan_policy='omit'), 0.) assert_raises(ValueError, stats.skew, x, nan_policy='raise') assert_raises(ValueError, stats.skew, x, nan_policy='foobar') def test_skewness_scalar(self): # `skew` must return a scalar for 1-dim input assert_equal(stats.skew(arange(10)), 0.0) def test_skew_propagate_nan(self): # Check that the shape of the result is the same for inputs # with and without nans, cf gh-5817 a = np.arange(8).reshape(2, -1).astype(float) a[1, 0] = np.nan with np.errstate(invalid='ignore'): s = stats.skew(a, axis=1, nan_policy="propagate") np.testing.assert_allclose(s, [0, np.nan], atol=1e-15) def test_kurtosis(self): # Scalar test case y = stats.kurtosis(self.scalar_testcase) assert_approx_equal(y, -3.0) # sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4 # sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5 # Set flags for axis = 0 and # fisher=0 (Pearson's defn of kurtosis for compatibility with Matlab) y = stats.kurtosis(self.testmathworks, 0, fisher=0, bias=1) assert_approx_equal(y, 2.1658856802973, 10) # Note that MATLAB has confusing docs for the following case # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) # The MATLAB docs imply that both should give Fisher's y = stats.kurtosis(self.testmathworks, fisher=0, bias=0) assert_approx_equal(y, 3.663542721189047, 10) y = stats.kurtosis(self.testcase, 0, 0) assert_approx_equal(y, 1.64) x = np.arange(10.) x[9] = np.nan assert_equal(stats.kurtosis(x), np.nan) assert_almost_equal(stats.kurtosis(x, nan_policy='omit'), -1.230000) assert_raises(ValueError, stats.kurtosis, x, nan_policy='raise') assert_raises(ValueError, stats.kurtosis, x, nan_policy='foobar') def test_kurtosis_array_scalar(self): assert_equal(type(stats.kurtosis([1,2,3])), float) def test_kurtosis_propagate_nan(self): # Check that the shape of the result is the same for inputs # with and without nans, cf gh-5817 a = np.arange(8).reshape(2, -1).astype(float) a[1, 0] = np.nan k = stats.kurtosis(a, axis=1, nan_policy="propagate") np.testing.assert_allclose(k, [-1.36, np.nan], atol=1e-15) def test_moment_accuracy(self): # 'moment' must have a small enough error compared to the slower # but very accurate numpy.power() implementation. tc_no_mean = self.testcase_moment_accuracy - \ np.mean(self.testcase_moment_accuracy) assert_allclose(np.power(tc_no_mean, 42).mean(), stats.moment(self.testcase_moment_accuracy, 42)) class TestStudentTest(object): X1 = np.array([-1, 0, 1]) X2 = np.array([0, 1, 2]) T1_0 = 0 P1_0 = 1 T1_1 = -1.732051 P1_1 = 0.2254033 T1_2 = -3.464102 P1_2 = 0.0741799 T2_0 = 1.732051 P2_0 = 0.2254033 def test_onesample(self): with suppress_warnings() as sup, np.errstate(invalid="ignore"): sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") t, p = stats.ttest_1samp(4., 3.) assert_(np.isnan(t)) assert_(np.isnan(p)) t, p = stats.ttest_1samp(self.X1, 0) assert_array_almost_equal(t, self.T1_0) assert_array_almost_equal(p, self.P1_0) res = stats.ttest_1samp(self.X1, 0) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) t, p = stats.ttest_1samp(self.X2, 0) assert_array_almost_equal(t, self.T2_0) assert_array_almost_equal(p, self.P2_0) t, p = stats.ttest_1samp(self.X1, 1) assert_array_almost_equal(t, self.T1_1) assert_array_almost_equal(p, self.P1_1) t, p = stats.ttest_1samp(self.X1, 2) assert_array_almost_equal(t, self.T1_2) assert_array_almost_equal(p, self.P1_2) # check nan policy np.random.seed(7654567) x = stats.norm.rvs(loc=5, scale=10, size=51) x[50] = np.nan with np.errstate(invalid="ignore"): assert_array_equal(stats.ttest_1samp(x, 5.0), (np.nan, np.nan)) assert_array_almost_equal(stats.ttest_1samp(x, 5.0, nan_policy='omit'), (-1.6412624074367159, 0.107147027334048005)) assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='raise') assert_raises(ValueError, stats.ttest_1samp, x, 5.0, nan_policy='foobar') def test_percentileofscore(): pcos = stats.percentileofscore assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0) for (kind, result) in [('mean', 35.0), ('strict', 30.0), ('weak', 40.0)]: assert_equal(pcos(np.arange(10) + 1, 4, kind=kind), result) # multiple - 2 for (kind, result) in [('rank', 45.0), ('strict', 30.0), ('weak', 50.0), ('mean', 40.0)]: assert_equal(pcos([1,2,3,4,4,5,6,7,8,9], 4, kind=kind), result) # multiple - 3 assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0) for (kind, result) in [('rank', 50.0), ('mean', 45.0), ('strict', 30.0), ('weak', 60.0)]: assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4, kind=kind), result) # missing for kind in ('rank', 'mean', 'strict', 'weak'): assert_equal(pcos([1,2,3,5,6,7,8,9,10,11], 4, kind=kind), 30) # larger numbers for (kind, result) in [('mean', 35.0), ('strict', 30.0), ('weak', 40.0)]: assert_equal( pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40, kind=kind), result) for (kind, result) in [('mean', 45.0), ('strict', 30.0), ('weak', 60.0)]: assert_equal( pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80], 40, kind=kind), result) for kind in ('rank', 'mean', 'strict', 'weak'): assert_equal( pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], 40, kind=kind), 30.0) # boundaries for (kind, result) in [('rank', 10.0), ('mean', 5.0), ('strict', 0.0), ('weak', 10.0)]: assert_equal( pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], 10, kind=kind), result) for (kind, result) in [('rank', 100.0), ('mean', 95.0), ('strict', 90.0), ('weak', 100.0)]: assert_equal( pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], 110, kind=kind), result) # out of bounds for (kind, score, result) in [('rank', 200, 100.0), ('mean', 200, 100.0), ('mean', 0, 0.0)]: assert_equal( pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110], score, kind=kind), result) assert_raises(ValueError, pcos, [1, 2, 3, 3, 4], 3, kind='unrecognized') PowerDivCase = namedtuple('Case', ['f_obs', 'f_exp', 'ddof', 'axis', 'chi2', # Pearson's 'log', # G-test (log-likelihood) 'mod_log', # Modified log-likelihood 'cr', # Cressie-Read (lambda=2/3) ]) # The details of the first two elements in power_div_1d_cases are used # in a test in TestPowerDivergence. Check that code before making # any changes here. power_div_1d_cases = [ # Use the default f_exp. PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=None, ddof=0, axis=None, chi2=4, log=2*(4*np.log(4/8) + 12*np.log(12/8)), mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), # Give a non-uniform f_exp. PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=[2, 16, 12, 2], ddof=0, axis=None, chi2=24, log=2*(4*np.log(4/2) + 8*np.log(8/16) + 8*np.log(8/2)), mod_log=2*(2*np.log(2/4) + 16*np.log(16/8) + 2*np.log(2/8)), cr=(4*((4/2)**(2/3) - 1) + 8*((8/16)**(2/3) - 1) + 8*((8/2)**(2/3) - 1))/(5/9)), # f_exp is a scalar. PowerDivCase(f_obs=[4, 8, 12, 8], f_exp=8, ddof=0, axis=None, chi2=4, log=2*(4*np.log(4/8) + 12*np.log(12/8)), mod_log=2*(8*np.log(8/4) + 8*np.log(8/12)), cr=(4*((4/8)**(2/3) - 1) + 12*((12/8)**(2/3) - 1))/(5/9)), # f_exp equal to f_obs. PowerDivCase(f_obs=[3, 5, 7, 9], f_exp=[3, 5, 7, 9], ddof=0, axis=0, chi2=0, log=0, mod_log=0, cr=0), ] power_div_empty_cases = [ # Shape is (0,)--a data set with length 0. The computed # test statistic should be 0. PowerDivCase(f_obs=[], f_exp=None, ddof=0, axis=0, chi2=0, log=0, mod_log=0, cr=0), # Shape is (0, 3). This is 3 data sets, but each data set has # length 0, so the computed test statistic should be [0, 0, 0]. PowerDivCase(f_obs=np.array([[],[],[]]).T, f_exp=None, ddof=0, axis=0, chi2=[0, 0, 0], log=[0, 0, 0], mod_log=[0, 0, 0], cr=[0, 0, 0]), # Shape is (3, 0). This represents an empty collection of # data sets in which each data set has length 3. The test # statistic should be an empty array. PowerDivCase(f_obs=np.array([[],[],[]]), f_exp=None, ddof=0, axis=0, chi2=[], log=[], mod_log=[], cr=[]), ] class TestPowerDivergence(object): def check_power_divergence(self, f_obs, f_exp, ddof, axis, lambda_, expected_stat): f_obs = np.asarray(f_obs) if axis is None: num_obs = f_obs.size else: b = np.broadcast(f_obs, f_exp) num_obs = b.shape[axis] with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Mean of empty slice") stat, p = stats.power_divergence( f_obs=f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_=lambda_) assert_allclose(stat, expected_stat) if lambda_ == 1 or lambda_ == "pearson": # Also test stats.chisquare. stat, p = stats.chisquare(f_obs=f_obs, f_exp=f_exp, ddof=ddof, axis=axis) assert_allclose(stat, expected_stat) ddof = np.asarray(ddof) expected_p = stats.distributions.chi2.sf(expected_stat, num_obs - 1 - ddof) assert_allclose(p, expected_p) def test_basic(self): for case in power_div_1d_cases: self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, None, case.chi2) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "pearson", case.chi2) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, 1, case.chi2) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "log-likelihood", case.log) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "mod-log-likelihood", case.mod_log) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "cressie-read", case.cr) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, 2/3, case.cr) def test_basic_masked(self): for case in power_div_1d_cases: mobs = np.ma.array(case.f_obs) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, None, case.chi2) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, "pearson", case.chi2) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, 1, case.chi2) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, "log-likelihood", case.log) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, "mod-log-likelihood", case.mod_log) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, "cressie-read", case.cr) self.check_power_divergence( mobs, case.f_exp, case.ddof, case.axis, 2/3, case.cr) def test_axis(self): case0 = power_div_1d_cases[0] case1 = power_div_1d_cases[1] f_obs = np.vstack((case0.f_obs, case1.f_obs)) f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), case1.f_exp)) # Check the four computational code paths in power_divergence # using a 2D array with axis=1. self.check_power_divergence( f_obs, f_exp, 0, 1, "pearson", [case0.chi2, case1.chi2]) self.check_power_divergence( f_obs, f_exp, 0, 1, "log-likelihood", [case0.log, case1.log]) self.check_power_divergence( f_obs, f_exp, 0, 1, "mod-log-likelihood", [case0.mod_log, case1.mod_log]) self.check_power_divergence( f_obs, f_exp, 0, 1, "cressie-read", [case0.cr, case1.cr]) # Reshape case0.f_obs to shape (2,2), and use axis=None. # The result should be the same. self.check_power_divergence( np.array(case0.f_obs).reshape(2, 2), None, 0, None, "pearson", case0.chi2) def test_ddof_broadcasting(self): # Test that ddof broadcasts correctly. # ddof does not affect the test statistic. It is broadcast # with the computed test statistic for the computation of # the p value. case0 = power_div_1d_cases[0] case1 = power_div_1d_cases[1] # Create 4x2 arrays of observed and expected frequencies. f_obs = np.vstack((case0.f_obs, case1.f_obs)).T f_exp = np.vstack((np.ones_like(case0.f_obs)*np.mean(case0.f_obs), case1.f_exp)).T expected_chi2 = [case0.chi2, case1.chi2] # ddof has shape (2, 1). This is broadcast with the computed # statistic, so p will have shape (2,2). ddof = np.array([[0], [1]]) stat, p = stats.power_divergence(f_obs, f_exp, ddof=ddof) assert_allclose(stat, expected_chi2) # Compute the p values separately, passing in scalars for ddof. stat0, p0 = stats.power_divergence(f_obs, f_exp, ddof=ddof[0,0]) stat1, p1 = stats.power_divergence(f_obs, f_exp, ddof=ddof[1,0]) assert_array_equal(p, np.vstack((p0, p1))) def test_empty_cases(self): with warnings.catch_warnings(): for case in power_div_empty_cases: self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "pearson", case.chi2) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "log-likelihood", case.log) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "mod-log-likelihood", case.mod_log) self.check_power_divergence( case.f_obs, case.f_exp, case.ddof, case.axis, "cressie-read", case.cr) def test_power_divergence_result_attributes(self): f_obs = power_div_1d_cases[0].f_obs f_exp = power_div_1d_cases[0].f_exp ddof = power_div_1d_cases[0].ddof axis = power_div_1d_cases[0].axis res = stats.power_divergence(f_obs=f_obs, f_exp=f_exp, ddof=ddof, axis=axis, lambda_="pearson") attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_chisquare_masked_arrays(): # Test masked arrays. obs = np.array([[8, 8, 16, 32, -1], [-1, -1, 3, 4, 5]]).T mask = np.array([[0, 0, 0, 0, 1], [1, 1, 0, 0, 0]]).T mobs = np.ma.masked_array(obs, mask) expected_chisq = np.array([24.0, 0.5]) expected_g = np.array([2*(2*8*np.log(0.5) + 32*np.log(2.0)), 2*(3*np.log(0.75) + 5*np.log(1.25))]) chi2 = stats.distributions.chi2 chisq, p = stats.chisquare(mobs) mat.assert_array_equal(chisq, expected_chisq) mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, mobs.count(axis=0) - 1)) g, p = stats.power_divergence(mobs, lambda_='log-likelihood') mat.assert_array_almost_equal(g, expected_g, decimal=15) mat.assert_array_almost_equal(p, chi2.sf(expected_g, mobs.count(axis=0) - 1)) chisq, p = stats.chisquare(mobs.T, axis=1) mat.assert_array_equal(chisq, expected_chisq) mat.assert_array_almost_equal(p, chi2.sf(expected_chisq, mobs.T.count(axis=1) - 1)) g, p = stats.power_divergence(mobs.T, axis=1, lambda_="log-likelihood") mat.assert_array_almost_equal(g, expected_g, decimal=15) mat.assert_array_almost_equal(p, chi2.sf(expected_g, mobs.count(axis=0) - 1)) obs1 = np.ma.array([3, 5, 6, 99, 10], mask=[0, 0, 0, 1, 0]) exp1 = np.ma.array([2, 4, 8, 10, 99], mask=[0, 0, 0, 0, 1]) chi2, p = stats.chisquare(obs1, f_exp=exp1) # Because of the mask at index 3 of obs1 and at index 4 of exp1, # only the first three elements are included in the calculation # of the statistic. mat.assert_array_equal(chi2, 1/2 + 1/4 + 4/8) # When axis=None, the two values should have type np.float64. chisq, p = stats.chisquare(np.ma.array([1,2,3]), axis=None) assert_(isinstance(chisq, np.float64)) assert_(isinstance(p, np.float64)) assert_equal(chisq, 1.0) assert_almost_equal(p, stats.distributions.chi2.sf(1.0, 2)) # Empty arrays: # A data set with length 0 returns a masked scalar. with np.errstate(invalid='ignore'): with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Mean of empty slice") chisq, p = stats.chisquare(np.ma.array([])) assert_(isinstance(chisq, np.ma.MaskedArray)) assert_equal(chisq.shape, ()) assert_(chisq.mask) empty3 = np.ma.array([[],[],[]]) # empty3 is a collection of 0 data sets (whose lengths would be 3, if # there were any), so the return value is an array with length 0. chisq, p = stats.chisquare(empty3) assert_(isinstance(chisq, np.ma.MaskedArray)) mat.assert_array_equal(chisq, []) # empty3.T is an array containing 3 data sets, each with length 0, # so an array of size (3,) is returned, with all values masked. with np.errstate(invalid='ignore'): with suppress_warnings() as sup: sup.filter(RuntimeWarning, "Mean of empty slice") chisq, p = stats.chisquare(empty3.T) assert_(isinstance(chisq, np.ma.MaskedArray)) assert_equal(chisq.shape, (3,)) assert_(np.all(chisq.mask)) def test_power_divergence_against_cressie_read_data(): # Test stats.power_divergence against tables 4 and 5 from # Cressie and Read, "Multimonial Goodness-of-Fit Tests", # J. R. Statist. Soc. B (1984), Vol 46, No. 3, pp. 440-464. # This tests the calculation for several values of lambda. # `table4` holds just the second and third columns from Table 4. table4 = np.array([ # observed, expected, 15, 15.171, 11, 13.952, 14, 12.831, 17, 11.800, 5, 10.852, 11, 9.9796, 10, 9.1777, 4, 8.4402, 8, 7.7620, 10, 7.1383, 7, 6.5647, 9, 6.0371, 11, 5.5520, 3, 5.1059, 6, 4.6956, 1, 4.3183, 1, 3.9713, 4, 3.6522, ]).reshape(-1, 2) table5 = np.array([ # lambda, statistic -10.0, 72.2e3, -5.0, 28.9e1, -3.0, 65.6, -2.0, 40.6, -1.5, 34.0, -1.0, 29.5, -0.5, 26.5, 0.0, 24.6, 0.5, 23.4, 0.67, 23.1, 1.0, 22.7, 1.5, 22.6, 2.0, 22.9, 3.0, 24.8, 5.0, 35.5, 10.0, 21.4e1, ]).reshape(-1, 2) for lambda_, expected_stat in table5: stat, p = stats.power_divergence(table4[:,0], table4[:,1], lambda_=lambda_) assert_allclose(stat, expected_stat, rtol=5e-3) def test_friedmanchisquare(): # see ticket:113 # verified with matlab and R # From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets" # 2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28) x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583, 0.775, 1.0, 0.94, 0.619, 0.972, 0.957]), array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583, 0.838, 1.0, 0.962, 0.666, 0.981, 0.978]), array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563, 0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]), array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625, 0.875, 1.0, 0.962, 0.669, 0.975, 0.970])] # From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001: x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]), array([2,2,1,2,3,1,2,3,2,1,1,3]), array([2,4,3,3,4,3,3,4,4,1,2,1]), array([3,5,4,3,4,4,3,3,3,4,4,4])] # From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01: # Probability from this example is inexact using Chisquare approximation of Friedman Chisquare. x3 = [array([7.0,9.9,8.5,5.1,10.3]), array([5.3,5.7,4.7,3.5,7.7]), array([4.9,7.6,5.5,2.8,8.4]), array([8.8,8.9,8.1,3.3,9.1])] assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]), (10.2283464566929, 0.0167215803284414)) assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), (18.9428571428571, 0.000280938375189499)) assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]), (10.68, 0.0135882729582176)) assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1]) # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.friedmanchisquare(*x1) check_named_results(res, attributes) # test using mstats assert_array_almost_equal(mstats.friedmanchisquare(x1[0], x1[1], x1[2], x1[3]), (10.2283464566929, 0.0167215803284414)) # the following fails # assert_array_almost_equal(mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]), # (18.9428571428571, 0.000280938375189499)) assert_array_almost_equal(mstats.friedmanchisquare(x3[0], x3[1], x3[2], x3[3]), (10.68, 0.0135882729582176)) assert_raises(ValueError, mstats.friedmanchisquare,x3[0],x3[1]) def test_kstest(): # from numpy.testing import assert_almost_equal # comparing with values from R x = np.linspace(-1,1,9) D,p = stats.kstest(x,'norm') assert_almost_equal(D, 0.15865525393145705, 12) assert_almost_equal(p, 0.95164069201518386, 1) x = np.linspace(-15,15,9) D,p = stats.kstest(x,'norm') assert_almost_equal(D, 0.44435602715924361, 15) assert_almost_equal(p, 0.038850140086788665, 8) # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.kstest(x, 'norm') check_named_results(res, attributes) # the following tests rely on deterministicaly replicated rvs np.random.seed(987654321) x = stats.norm.rvs(loc=0.2, size=100) D,p = stats.kstest(x, 'norm', mode='asymp') assert_almost_equal(D, 0.12464329735846891, 15) assert_almost_equal(p, 0.089444888711820769, 15) assert_almost_equal(np.array(stats.kstest(x, 'norm', mode='asymp')), np.array((0.12464329735846891, 0.089444888711820769)), 15) assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='less')), np.array((0.12464329735846891, 0.040989164077641749)), 15) # this 'greater' test fails with precision of decimal=14 assert_almost_equal(np.array(stats.kstest(x,'norm', alternative='greater')), np.array((0.0072115233216310994, 0.98531158590396228)), 12) # missing: no test that uses *args def test_ks_2samp(): # exact small sample solution data1 = np.array([1.0,2.0]) data2 = np.array([1.0,2.0,3.0]) assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)), np.array((0.33333333333333337, 0.99062316386915694))) assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)), np.array((0.66666666666666674, 0.42490954988801982))) # these can also be verified graphically assert_almost_equal( np.array(stats.ks_2samp(np.linspace(1,100,100), np.linspace(1,100,100)+2+0.1)), np.array((0.030000000000000027, 0.99999999996005062))) assert_almost_equal( np.array(stats.ks_2samp(np.linspace(1,100,100), np.linspace(1,100,100)+2-0.1)), np.array((0.020000000000000018, 0.99999999999999933))) # these are just regression tests assert_almost_equal( np.array(stats.ks_2samp(np.linspace(1,100,100), np.linspace(1,100,110)+20.1)), np.array((0.21090909090909091, 0.015880386730710221))) assert_almost_equal( np.array(stats.ks_2samp(np.linspace(1,100,100), np.linspace(1,100,110)+20-0.1)), np.array((0.20818181818181825, 0.017981441789762638))) # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.ks_2samp(data1 - 0.01, data2) check_named_results(res, attributes) def test_ttest_rel(): # regression test tr,pr = 0.81248591389165692, 0.41846234511362157 tpr = ([tr,-tr],[pr,pr]) rvs1 = np.linspace(1,100,100) rvs2 = np.linspace(1.01,99.989,100) rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)]) rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)]) t,p = stats.ttest_rel(rvs1, rvs2, axis=0) assert_array_almost_equal([t,p],(tr,pr)) t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0) assert_array_almost_equal([t,p],tpr) t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1) assert_array_almost_equal([t,p],tpr) # test scalars with suppress_warnings() as sup, np.errstate(invalid="ignore"): sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") t, p = stats.ttest_rel(4., 3.) assert_(np.isnan(t)) assert_(np.isnan(p)) # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.ttest_rel(rvs1, rvs2, axis=0) check_named_results(res, attributes) # test on 3 dimensions rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1) assert_array_almost_equal(np.abs(t), tr) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (2, 3)) t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) assert_array_almost_equal(np.abs(t), tr) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (3, 2)) # check nan policy np.random.seed(12345678) x = stats.norm.rvs(loc=5, scale=10, size=501) x[500] = np.nan y = (stats.norm.rvs(loc=5, scale=10, size=501) + stats.norm.rvs(scale=0.2, size=501)) y[500] = np.nan with np.errstate(invalid="ignore"): assert_array_equal(stats.ttest_rel(x, x), (np.nan, np.nan)) assert_array_almost_equal(stats.ttest_rel(x, y, nan_policy='omit'), (0.25299925303978066, 0.8003729814201519)) assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='raise') assert_raises(ValueError, stats.ttest_rel, x, y, nan_policy='foobar') # test zero division problem t, p = stats.ttest_rel([0, 0, 0], [1, 1, 1]) assert_equal((np.abs(t), p), (np.inf, 0)) with np.errstate(invalid="ignore"): assert_equal(stats.ttest_rel([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) # check that nan in input array result in nan output anan = np.array([[1, np.nan], [-1, 1]]) assert_equal(stats.ttest_rel(anan, np.zeros((2, 2))), ([0, np.nan], [1, np.nan])) # test incorrect input shape raise an error x = np.arange(24) assert_raises(ValueError, stats.ttest_rel, x.reshape((8, 3)), x.reshape((2, 3, 4))) def test_ttest_rel_nan_2nd_arg(): # regression test for gh-6134: nans in the second arg were not handled x = [np.nan, 2.0, 3.0, 4.0] y = [1.0, 2.0, 1.0, 2.0] r1 = stats.ttest_rel(x, y, nan_policy='omit') r2 = stats.ttest_rel(y, x, nan_policy='omit') assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) # NB: arguments are paired when NaNs are dropped r3 = stats.ttest_rel(y[1:], x[1:]) assert_allclose(r2, r3, atol=1e-15) # .. and this is consistent with R. R code: # x = c(NA, 2.0, 3.0, 4.0) # y = c(1.0, 2.0, 1.0, 2.0) # t.test(x, y, paired=TRUE) assert_allclose(r2, (-2, 0.1835), atol=1e-4) def _desc_stats(x1, x2, axis=0): def _stats(x, axis=0): x = np.asarray(x) mu = np.mean(x, axis=axis) std = np.std(x, axis=axis, ddof=1) nobs = x.shape[axis] return mu, std, nobs return _stats(x1, axis) + _stats(x2, axis) def test_ttest_ind(): # regression test tr = 1.0912746897927283 pr = 0.27647818616351882 tpr = ([tr,-tr],[pr,pr]) rvs2 = np.linspace(1,100,100) rvs1 = np.linspace(5,105,100) rvs1_2D = np.array([rvs1, rvs2]) rvs2_2D = np.array([rvs2, rvs1]) t,p = stats.ttest_ind(rvs1, rvs2, axis=0) assert_array_almost_equal([t,p],(tr,pr)) # test from_stats API assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, rvs2)), [t, p]) t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0) assert_array_almost_equal([t,p],tpr) args = _desc_stats(rvs1_2D.T, rvs2_2D.T) assert_array_almost_equal(stats.ttest_ind_from_stats(*args), [t, p]) t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1) assert_array_almost_equal([t,p],tpr) args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) assert_array_almost_equal(stats.ttest_ind_from_stats(*args), [t, p]) # test scalars with suppress_warnings() as sup, np.errstate(invalid="ignore"): sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") t, p = stats.ttest_ind(4., 3.) assert_(np.isnan(t)) assert_(np.isnan(p)) # test on 3 dimensions rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1) assert_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (2, 3)) t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2) assert_array_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (3, 2)) # check nan policy np.random.seed(12345678) x = stats.norm.rvs(loc=5, scale=10, size=501) x[500] = np.nan y = stats.norm.rvs(loc=5, scale=10, size=500) with np.errstate(invalid="ignore"): assert_array_equal(stats.ttest_ind(x, y), (np.nan, np.nan)) assert_array_almost_equal(stats.ttest_ind(x, y, nan_policy='omit'), (0.24779670949091914, 0.80434267337517906)) assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='raise') assert_raises(ValueError, stats.ttest_ind, x, y, nan_policy='foobar') # test zero division problem t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1]) assert_equal((np.abs(t), p), (np.inf, 0)) with np.errstate(invalid="ignore"): assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0]), (np.nan, np.nan)) # check that nan in input array result in nan output anan = np.array([[1, np.nan], [-1, 1]]) assert_equal(stats.ttest_ind(anan, np.zeros((2, 2))), ([0, np.nan], [1, np.nan])) def test_ttest_ind_with_uneq_var(): # check vs. R a = (1, 2, 3) b = (1.1, 2.9, 4.2) pr = 0.53619490753126731 tr = -0.68649512735572582 t, p = stats.ttest_ind(a, b, equal_var=False) assert_array_almost_equal([t,p], [tr, pr]) # test from desc stats API assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), equal_var=False), [t, p]) a = (1, 2, 3, 4) pr = 0.84354139131608286 tr = -0.2108663315950719 t, p = stats.ttest_ind(a, b, equal_var=False) assert_array_almost_equal([t,p], [tr, pr]) assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(a, b), equal_var=False), [t, p]) # regression test tr = 1.0912746897927283 tr_uneq_n = 0.66745638708050492 pr = 0.27647831993021388 pr_uneq_n = 0.50873585065616544 tpr = ([tr,-tr],[pr,pr]) rvs3 = np.linspace(1,100, 25) rvs2 = np.linspace(1,100,100) rvs1 = np.linspace(5,105,100) rvs1_2D = np.array([rvs1, rvs2]) rvs2_2D = np.array([rvs2, rvs1]) t,p = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) assert_array_almost_equal([t,p],(tr,pr)) assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, rvs2), equal_var=False), (t, p)) t,p = stats.ttest_ind(rvs1, rvs3, axis=0, equal_var=False) assert_array_almost_equal([t,p], (tr_uneq_n, pr_uneq_n)) assert_array_almost_equal(stats.ttest_ind_from_stats(*_desc_stats(rvs1, rvs3), equal_var=False), (t, p)) t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0, equal_var=False) assert_array_almost_equal([t,p],tpr) args = _desc_stats(rvs1_2D.T, rvs2_2D.T) assert_array_almost_equal(stats.ttest_ind_from_stats(*args, equal_var=False), (t, p)) t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1, equal_var=False) assert_array_almost_equal([t,p],tpr) args = _desc_stats(rvs1_2D, rvs2_2D, axis=1) assert_array_almost_equal(stats.ttest_ind_from_stats(*args, equal_var=False), (t, p)) # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.ttest_ind(rvs1, rvs2, axis=0, equal_var=False) check_named_results(res, attributes) # test on 3 dimensions rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D]) rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D]) t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1, equal_var=False) assert_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (2, 3)) args = _desc_stats(rvs1_3D, rvs2_3D, axis=1) t, p = stats.ttest_ind_from_stats(*args, equal_var=False) assert_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (2, 3)) t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2, equal_var=False) assert_array_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (3, 2)) args = _desc_stats(np.rollaxis(rvs1_3D, 2), np.rollaxis(rvs2_3D, 2), axis=2) t, p = stats.ttest_ind_from_stats(*args, equal_var=False) assert_array_almost_equal(np.abs(t), np.abs(tr)) assert_array_almost_equal(np.abs(p), pr) assert_equal(t.shape, (3, 2)) # test zero division problem t, p = stats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) assert_equal((np.abs(t), p), (np.inf, 0)) with np.errstate(all='ignore'): assert_equal(stats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False), (np.nan, np.nan)) # check that nan in input array result in nan output anan = np.array([[1, np.nan], [-1, 1]]) assert_equal(stats.ttest_ind(anan, np.zeros((2, 2)), equal_var=False), ([0, np.nan], [1, np.nan])) def test_ttest_ind_nan_2nd_arg(): # regression test for gh-6134: nans in the second arg were not handled x = [np.nan, 2.0, 3.0, 4.0] y = [1.0, 2.0, 1.0, 2.0] r1 = stats.ttest_ind(x, y, nan_policy='omit') r2 = stats.ttest_ind(y, x, nan_policy='omit') assert_allclose(r2.statistic, -r1.statistic, atol=1e-15) assert_allclose(r2.pvalue, r1.pvalue, atol=1e-15) # NB: arguments are not paired when NaNs are dropped r3 = stats.ttest_ind(y, x[1:]) assert_allclose(r2, r3, atol=1e-15) # .. and this is consistent with R. R code: # x = c(NA, 2.0, 3.0, 4.0) # y = c(1.0, 2.0, 1.0, 2.0) # t.test(x, y, var.equal=TRUE) assert_allclose(r2, (-2.5354627641855498, 0.052181400457057901), atol=1e-15) def test_gh5686(): mean1, mean2 = np.array([1, 2]), np.array([3, 4]) std1, std2 = np.array([5, 3]), np.array([4, 5]) nobs1, nobs2 = np.array([130, 140]), np.array([100, 150]) # This will raise a TypeError unless gh-5686 is fixed. stats.ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2) def test_ttest_1samp_new(): n1, n2, n3 = (10,15,20) rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3)) # check multidimensional array and correct axis handling # deterministic rvn1 and rvn2 would be better as in test_ttest_rel t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0) t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0) t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1) assert_array_almost_equal(t1,t2, decimal=14) assert_almost_equal(t1[0,0],t3, decimal=14) assert_equal(t1.shape, (n2,n3)) t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1) t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1) t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1) assert_array_almost_equal(t1,t2, decimal=14) assert_almost_equal(t1[0,0],t3, decimal=14) assert_equal(t1.shape, (n1,n3)) t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2) t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2) t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1) assert_array_almost_equal(t1,t2, decimal=14) assert_almost_equal(t1[0,0],t3, decimal=14) assert_equal(t1.shape, (n1,n2)) # test zero division problem t, p = stats.ttest_1samp([0, 0, 0], 1) assert_equal((np.abs(t), p), (np.inf, 0)) with np.errstate(all='ignore'): assert_equal(stats.ttest_1samp([0, 0, 0], 0), (np.nan, np.nan)) # check that nan in input array result in nan output anan = np.array([[1, np.nan],[-1, 1]]) assert_equal(stats.ttest_1samp(anan, 0), ([0, np.nan], [1, np.nan])) class TestDescribe(object): def test_describe_scalar(self): with suppress_warnings() as sup, np.errstate(invalid="ignore"): sup.filter(RuntimeWarning, "Degrees of freedom <= 0 for slice") n, mm, m, v, sk, kurt = stats.describe(4.) assert_equal(n, 1) assert_equal(mm, (4.0, 4.0)) assert_equal(m, 4.0) assert_(np.isnan(v)) assert_array_almost_equal(sk, 0.0, decimal=13) assert_array_almost_equal(kurt, -3.0, decimal=13) def test_describe_numbers(self): x = np.vstack((np.ones((3,4)), 2 * np.ones((2,4)))) nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) mc = np.array([1.4, 1.4, 1.4, 1.4]) vc = np.array([0.3, 0.3, 0.3, 0.3]) skc = [0.40824829046386357] * 4 kurtc = [-1.833333333333333] * 4 n, mm, m, v, sk, kurt = stats.describe(x) assert_equal(n, nc) assert_equal(mm, mmc) assert_equal(m, mc) assert_equal(v, vc) assert_array_almost_equal(sk, skc, decimal=13) assert_array_almost_equal(kurt, kurtc, decimal=13) n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1) assert_equal(n, nc) assert_equal(mm, mmc) assert_equal(m, mc) assert_equal(v, vc) assert_array_almost_equal(sk, skc, decimal=13) assert_array_almost_equal(kurt, kurtc, decimal=13) x = np.arange(10.) x[9] = np.nan nc, mmc = (9, (0.0, 8.0)) mc = 4.0 vc = 7.5 skc = 0.0 kurtc = -1.2300000000000002 n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit') assert_equal(n, nc) assert_equal(mm, mmc) assert_equal(m, mc) assert_equal(v, vc) assert_array_almost_equal(sk, skc) assert_array_almost_equal(kurt, kurtc, decimal=13) assert_raises(ValueError, stats.describe, x, nan_policy='raise') assert_raises(ValueError, stats.describe, x, nan_policy='foobar') def test_describe_result_attributes(self): actual = stats.describe(np.arange(5)) attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis') check_named_results(actual, attributes) def test_describe_ddof(self): x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4)))) nc, mmc = (5, ([1., 1., 1., 1.], [2., 2., 2., 2.])) mc = np.array([1.4, 1.4, 1.4, 1.4]) vc = np.array([0.24, 0.24, 0.24, 0.24]) skc = [0.40824829046386357] * 4 kurtc = [-1.833333333333333] * 4 n, mm, m, v, sk, kurt = stats.describe(x, ddof=0) assert_equal(n, nc) assert_allclose(mm, mmc, rtol=1e-15) assert_allclose(m, mc, rtol=1e-15) assert_allclose(v, vc, rtol=1e-15) assert_array_almost_equal(sk, skc, decimal=13) assert_array_almost_equal(kurt, kurtc, decimal=13) def test_describe_axis_none(self): x = np.vstack((np.ones((3, 4)), 2 * np.ones((2, 4)))) # expected values e_nobs, e_minmax = (20, (1.0, 2.0)) e_mean = 1.3999999999999999 e_var = 0.25263157894736848 e_skew = 0.4082482904638634 e_kurt = -1.8333333333333333 # actual values a = stats.describe(x, axis=None) assert_equal(a.nobs, e_nobs) assert_almost_equal(a.minmax, e_minmax) assert_almost_equal(a.mean, e_mean) assert_almost_equal(a.variance, e_var) assert_array_almost_equal(a.skewness, e_skew, decimal=13) assert_array_almost_equal(a.kurtosis, e_kurt, decimal=13) def test_describe_empty(self): assert_raises(ValueError, stats.describe, []) def test_normalitytests(): assert_raises(ValueError, stats.skewtest, 4.) assert_raises(ValueError, stats.kurtosistest, 4.) assert_raises(ValueError, stats.normaltest, 4.) # numbers verified with R: dagoTest in package fBasics st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734) pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019) x = np.array((-2,-1,0,1,2,3)*4)**2 attributes = ('statistic', 'pvalue') assert_array_almost_equal(stats.normaltest(x), (st_normal, pv_normal)) check_named_results(stats.normaltest(x), attributes) assert_array_almost_equal(stats.skewtest(x), (st_skew, pv_skew)) check_named_results(stats.skewtest(x), attributes) assert_array_almost_equal(stats.kurtosistest(x), (st_kurt, pv_kurt)) check_named_results(stats.kurtosistest(x), attributes) # Test axis=None (equal to axis=0 for 1-D input) assert_array_almost_equal(stats.normaltest(x, axis=None), (st_normal, pv_normal)) assert_array_almost_equal(stats.skewtest(x, axis=None), (st_skew, pv_skew)) assert_array_almost_equal(stats.kurtosistest(x, axis=None), (st_kurt, pv_kurt)) x = np.arange(10.) x[9] = np.nan with np.errstate(invalid="ignore"): assert_array_equal(stats.skewtest(x), (np.nan, np.nan)) expected = (1.0184643553962129, 0.30845733195153502) assert_array_almost_equal(stats.skewtest(x, nan_policy='omit'), expected) with np.errstate(all='ignore'): assert_raises(ValueError, stats.skewtest, x, nan_policy='raise') assert_raises(ValueError, stats.skewtest, x, nan_policy='foobar') x = np.arange(30.) x[29] = np.nan with np.errstate(all='ignore'): assert_array_equal(stats.kurtosistest(x), (np.nan, np.nan)) expected = (-2.2683547379505273, 0.023307594135872967) assert_array_almost_equal(stats.kurtosistest(x, nan_policy='omit'), expected) assert_raises(ValueError, stats.kurtosistest, x, nan_policy='raise') assert_raises(ValueError, stats.kurtosistest, x, nan_policy='foobar') with np.errstate(all='ignore'): assert_array_equal(stats.normaltest(x), (np.nan, np.nan)) expected = (6.2260409514287449, 0.04446644248650191) assert_array_almost_equal(stats.normaltest(x, nan_policy='omit'), expected) assert_raises(ValueError, stats.normaltest, x, nan_policy='raise') assert_raises(ValueError, stats.normaltest, x, nan_policy='foobar') class TestRankSums(object): def test_ranksums_result_attributes(self): res = stats.ranksums(np.arange(5), np.arange(25)) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) class TestJarqueBera(object): def test_jarque_bera_stats(self): np.random.seed(987654321) x = np.random.normal(0, 1, 100000) y = np.random.chisquare(10000, 100000) z = np.random.rayleigh(1, 100000) assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(y)[1]) assert_(stats.jarque_bera(x)[1] > stats.jarque_bera(z)[1]) assert_(stats.jarque_bera(y)[1] > stats.jarque_bera(z)[1]) def test_jarque_bera_array_like(self): np.random.seed(987654321) x = np.random.normal(0, 1, 100000) JB1, p1 = stats.jarque_bera(list(x)) JB2, p2 = stats.jarque_bera(tuple(x)) JB3, p3 = stats.jarque_bera(x.reshape(2, 50000)) assert_(JB1 == JB2 == JB3) assert_(p1 == p2 == p3) def test_jarque_bera_size(self): assert_raises(ValueError, stats.jarque_bera, []) def test_skewtest_too_few_samples(): # Regression test for ticket #1492. # skewtest requires at least 8 samples; 7 should raise a ValueError. x = np.arange(7.0) assert_raises(ValueError, stats.skewtest, x) def test_kurtosistest_too_few_samples(): # Regression test for ticket #1425. # kurtosistest requires at least 5 samples; 4 should raise a ValueError. x = np.arange(4.0) assert_raises(ValueError, stats.kurtosistest, x) class TestMannWhitneyU(object): X = [19.8958398126694, 19.5452691647182, 19.0577309166425, 21.716543054589, 20.3269502208702, 20.0009273294025, 19.3440043632957, 20.4216806548105, 19.0649894736528, 18.7808043120398, 19.3680942943298, 19.4848044069953, 20.7514611265663, 19.0894948874598, 19.4975522356628, 18.9971170734274, 20.3239606288208, 20.6921298083835, 19.0724259532507, 18.9825187935021, 19.5144462609601, 19.8256857844223, 20.5174677102032, 21.1122407995892, 17.9490854922535, 18.2847521114727, 20.1072217648826, 18.6439891962179, 20.4970638083542, 19.5567594734914] Y = [19.2790668029091, 16.993808441865, 18.5416338448258, 17.2634018833575, 19.1577183624616, 18.5119655377495, 18.6068455037221, 18.8358343362655, 19.0366413269742, 18.1135025515417, 19.2201873866958, 17.8344909022841, 18.2894380745856, 18.6661374133922, 19.9688601693252, 16.0672254617636, 19.00596360572, 19.201561539032, 19.0487501090183, 19.0847908674356] significant = 14 def test_mannwhitneyu_one_sided(self): u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less') u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater') u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater') u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less') assert_equal(p1, p2) assert_equal(p3, p4) assert_(p1 != p3) assert_equal(u1, 498) assert_equal(u2, 102) assert_equal(u3, 498) assert_equal(u4, 102) assert_approx_equal(p1, 0.999957683256589, significant=self.significant) assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant) def test_mannwhitneyu_two_sided(self): u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided') u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided') assert_equal(p1, p2) assert_equal(u1, 498) assert_equal(u2, 102) assert_approx_equal(p1, 9.188326533255e-05, significant=self.significant) def test_mannwhitneyu_default(self): # The default value for alternative is None with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Calling `mannwhitneyu` without .*`alternative`") u1, p1 = stats.mannwhitneyu(self.X, self.Y) u2, p2 = stats.mannwhitneyu(self.Y, self.X) u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None) assert_equal(p1, p2) assert_equal(p1, p3) assert_equal(u1, 102) assert_equal(u2, 102) assert_equal(u3, 102) assert_approx_equal(p1, 4.5941632666275e-05, significant=self.significant) def test_mannwhitneyu_no_correct_one_sided(self): u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, alternative='less') u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, alternative='greater') u3, p3 = stats.mannwhitneyu(self.X, self.Y, False, alternative='greater') u4, p4 = stats.mannwhitneyu(self.Y, self.X, False, alternative='less') assert_equal(p1, p2) assert_equal(p3, p4) assert_(p1 != p3) assert_equal(u1, 498) assert_equal(u2, 102) assert_equal(u3, 498) assert_equal(u4, 102) assert_approx_equal(p1, 0.999955905990004, significant=self.significant) assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant) def test_mannwhitneyu_no_correct_two_sided(self): u1, p1 = stats.mannwhitneyu(self.X, self.Y, False, alternative='two-sided') u2, p2 = stats.mannwhitneyu(self.Y, self.X, False, alternative='two-sided') assert_equal(p1, p2) assert_equal(u1, 498) assert_equal(u2, 102) assert_approx_equal(p1, 8.81880199916178e-05, significant=self.significant) def test_mannwhitneyu_no_correct_default(self): # The default value for alternative is None with suppress_warnings() as sup: sup.filter(DeprecationWarning, "Calling `mannwhitneyu` without .*`alternative`") u1, p1 = stats.mannwhitneyu(self.X, self.Y, False) u2, p2 = stats.mannwhitneyu(self.Y, self.X, False) u3, p3 = stats.mannwhitneyu(self.X, self.Y, False, alternative=None) assert_equal(p1, p2) assert_equal(p1, p3) assert_equal(u1, 102) assert_equal(u2, 102) assert_equal(u3, 102) assert_approx_equal(p1, 4.40940099958089e-05, significant=self.significant) def test_mannwhitneyu_ones(self): x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.]) # p-value verified with matlab and R to 5 significant digits assert_array_almost_equal(stats.stats.mannwhitneyu(x, y, alternative='less'), (16980.5, 2.8214327656317373e-005), decimal=12) def test_mannwhitneyu_result_attributes(self): # test for namedtuple attribute results attributes = ('statistic', 'pvalue') res = stats.mannwhitneyu(self.X, self.Y, alternative="less") check_named_results(res, attributes) def test_pointbiserial(): # same as mstats test except for the nan # Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, 0,0,0,0,1] y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, 0.8,0.7,0.6,0.5,0.2,0.2,0.1] assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5) # test for namedtuple attribute results attributes = ('correlation', 'pvalue') res = stats.pointbiserialr(x, y) check_named_results(res, attributes) def test_obrientransform(): # A couple tests calculated by hand. x1 = np.array([0, 2, 4]) t1 = stats.obrientransform(x1) expected = [7, -2, 7] assert_allclose(t1[0], expected) x2 = np.array([0, 3, 6, 9]) t2 = stats.obrientransform(x2) expected = np.array([30, 0, 0, 30]) assert_allclose(t2[0], expected) # Test two arguments. a, b = stats.obrientransform(x1, x2) assert_equal(a, t1[0]) assert_equal(b, t2[0]) # Test three arguments. a, b, c = stats.obrientransform(x1, x2, x1) assert_equal(a, t1[0]) assert_equal(b, t2[0]) assert_equal(c, t1[0]) # This is a regression test to check np.var replacement. # The author of this test didn't separately verify the numbers. x1 = np.arange(5) result = np.array( [[5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667], [21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]]) assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8) # Example from "O'Brien Test for Homogeneity of Variance" # by Herve Abdi. values = range(5, 11) reps = np.array([5, 11, 9, 3, 2, 2]) data = np.repeat(values, reps) transformed_values = np.array([3.1828, 0.5591, 0.0344, 1.6086, 5.2817, 11.0538]) expected = np.repeat(transformed_values, reps) result = stats.obrientransform(data) assert_array_almost_equal(result[0], expected, decimal=4) class HarMeanTestCase: def test_1dlist(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] b = 34.1417152147 self.do(a, b) def test_1darray(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 34.1417152147 self.do(a, b) def test_1dma(self): # Test a 1d masked array a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 34.1417152147 self.do(a, b) def test_1dmavalue(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) b = 31.8137186141 self.do(a, b) # Note the next tests use axis=None as default, not axis=0 def test_2dlist(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 38.6696271841 self.do(a, b) def test_2darray(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 38.6696271841 self.do(np.array(a), b) def test_2dma(self): # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 38.6696271841 self.do(np.ma.array(a), b) def test_2daxis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([22.88135593, 39.13043478, 52.90076336, 65.45454545]) self.do(a, b, axis=0) def test_2daxis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([19.2, 63.03939962, 103.80078637]) self.do(a, b, axis=1) def test_2dmatrixdaxis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.88135593, 39.13043478, 52.90076336, 65.45454545]]) self.do(np.matrix(a), b, axis=0) def test_2dmatrixaxis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[19.2, 63.03939962, 103.80078637]]).T self.do(np.matrix(a), b, axis=1) class TestHarMean(HarMeanTestCase): def do(self, a, b, axis=None, dtype=None): x = stats.hmean(a, axis=axis, dtype=dtype) assert_almost_equal(b, x) assert_equal(x.dtype, dtype) class GeoMeanTestCase: def test_1dlist(self): # Test a 1d list a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] b = 45.2872868812 self.do(a, b) def test_1darray(self): # Test a 1d array a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 self.do(a, b) def test_1dma(self): # Test a 1d masked array a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) b = 45.2872868812 self.do(a, b) def test_1dmavalue(self): # Test a 1d masked array with a masked value a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1]) b = 41.4716627439 self.do(a, b) # Note the next tests use axis=None as default, not axis=0 def test_2dlist(self): # Test a 2d list a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 self.do(a, b) def test_2darray(self): # Test a 2d array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 self.do(np.array(a), b) def test_2dma(self): # Test a 2d masked array a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = 52.8885199 self.do(np.ma.array(a), b) def test_2daxis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([35.56893304, 49.32424149, 61.3579244, 72.68482371]) self.do(a, b, axis=0) def test_2daxis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.array([22.13363839, 64.02171746, 104.40086817]) self.do(a, b, axis=1) def test_2dmatrixdaxis0(self): # Test a 2d list with axis=0 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[35.56893304, 49.32424149, 61.3579244, 72.68482371]]) self.do(np.matrix(a), b, axis=0) def test_2dmatrixaxis1(self): # Test a 2d list with axis=1 a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]] b = np.matrix([[22.13363839, 64.02171746, 104.40086817]]).T self.do(np.matrix(a), b, axis=1) def test_1dlist0(self): # Test a 1d list with zero element a = [10, 20, 30, 40, 50, 60, 70, 80, 90, 0] b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: self.do(a, b) finally: np.seterr(**olderr) def test_1darray0(self): # Test a 1d array with zero element a = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) b = 0.0 # due to exp(-inf)=0 olderr = np.seterr(all='ignore') try: self.do(a, b) finally: np.seterr(**olderr) def test_1dma0(self): # Test a 1d masked array with zero element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0]) b = 41.4716627439 olderr = np.seterr(all='ignore') try: self.do(a, b) finally: np.seterr(**olderr) def test_1dmainf(self): # Test a 1d masked array with negative element a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1]) b = 41.4716627439 olderr = np.seterr(all='ignore') try: self.do(a, b) finally: np.seterr(**olderr) class TestGeoMean(GeoMeanTestCase): def do(self, a, b, axis=None, dtype=None): # Note this doesn't test when axis is not specified x = stats.gmean(a, axis=axis, dtype=dtype) assert_almost_equal(b, x) assert_equal(x.dtype, dtype) def test_binomtest(): # precision tests compared to R for ticket:986 pp = np.concatenate((np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5), np.linspace(0.85,0.95,5))) n = 501 x = 450 results = [0.0, 0.0, 1.0159969301994141e-304, 2.9752418572150531e-275, 7.7668382922535275e-250, 2.3381250925167094e-099, 7.8284591587323951e-081, 9.9155947819961383e-065, 2.8729390725176308e-050, 1.7175066298388421e-037, 0.0021070691951093692, 0.12044570587262322, 0.88154763174802508, 0.027120993063129286, 2.6102587134694721e-006] for p, res in zip(pp,results): assert_approx_equal(stats.binom_test(x, n, p), res, significant=12, err_msg='fail forp=%f' % p) assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024, significant=12, err_msg='fail forp=%f' % p) def test_binomtest2(): # test added for issue #2384 res2 = [ [1.0, 1.0], [0.5,1.0,0.5], [0.25,1.00,1.00,0.25], [0.125,0.625,1.000,0.625,0.125], [0.0625,0.3750,1.0000,1.0000,0.3750,0.0625], [0.03125,0.21875,0.68750,1.00000,0.68750,0.21875,0.03125], [0.015625,0.125000,0.453125,1.000000,1.000000,0.453125,0.125000,0.015625], [0.0078125,0.0703125,0.2890625,0.7265625,1.0000000,0.7265625,0.2890625, 0.0703125,0.0078125], [0.00390625,0.03906250,0.17968750,0.50781250,1.00000000,1.00000000, 0.50781250,0.17968750,0.03906250,0.00390625], [0.001953125,0.021484375,0.109375000,0.343750000,0.753906250,1.000000000, 0.753906250,0.343750000,0.109375000,0.021484375,0.001953125] ] for k in range(1, 11): res1 = [stats.binom_test(v, k, 0.5) for v in range(k + 1)] assert_almost_equal(res1, res2[k-1], decimal=10) def test_binomtest3(): # test added for issue #2384 # test when x == n*p and neighbors res3 = [stats.binom_test(v, v*k, 1./k) for v in range(1, 11) for k in range(2, 11)] assert_equal(res3, np.ones(len(res3), int)) #> bt=c() #> for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i-1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}} binom_testm1 = np.array([ 0.5, 0.5555555555555556, 0.578125, 0.5904000000000003, 0.5981224279835393, 0.603430543396034, 0.607304096221924, 0.610255656871054, 0.612579511000001, 0.625, 0.670781893004115, 0.68853759765625, 0.6980101120000006, 0.703906431368616, 0.70793209416498, 0.7108561134173507, 0.713076544331419, 0.714820192935702, 0.6875, 0.7268709038256367, 0.7418963909149174, 0.74986110468096, 0.7548015520398076, 0.7581671424768577, 0.760607984787832, 0.762459425024199, 0.7639120677676575, 0.7265625, 0.761553963657302, 0.774800934828818, 0.7818005980538996, 0.78613491480358, 0.789084353140195, 0.7912217659828884, 0.79284214559524, 0.794112956558801, 0.75390625, 0.7856929451142176, 0.7976688481430754, 0.8039848974727624, 0.807891868948366, 0.8105487660137676, 0.812473307174702, 0.8139318233591120, 0.815075399104785, 0.7744140625, 0.8037322594985427, 0.814742863657656, 0.8205425178645808, 0.8241275984172285, 0.8265645374416, 0.8283292196088257, 0.829666291102775, 0.8307144686362666, 0.7905273437499996, 0.8178712053954738, 0.828116983756619, 0.833508948940494, 0.8368403871552892, 0.839104213210105, 0.840743186196171, 0.84198481438049, 0.8429580531563676, 0.803619384765625, 0.829338573944648, 0.8389591907548646, 0.84401876783902, 0.84714369697889, 0.8492667010581667, 0.850803474598719, 0.851967542858308, 0.8528799045949524, 0.8145294189453126, 0.838881732845347, 0.847979024541911, 0.852760894015685, 0.8557134656773457, 0.8577190131799202, 0.85917058278431, 0.860270010472127, 0.861131648404582, 0.823802947998047, 0.846984756807511, 0.855635653643743, 0.860180994825685, 0.86298688573253, 0.864892525675245, 0.866271647085603, 0.867316125625004, 0.8681346531755114 ]) # > bt=c() # > for(i in as.single(1:10)){for(k in as.single(2:10)){bt = c(bt, binom.test(i+1, k*i,(1/k))$p.value); print(c(i+1, k*i,(1/k)))}} binom_testp1 = np.array([ 0.5, 0.259259259259259, 0.26171875, 0.26272, 0.2632244513031551, 0.2635138663069203, 0.2636951804161073, 0.2638162407564354, 0.2639010709000002, 0.625, 0.4074074074074074, 0.42156982421875, 0.4295746560000003, 0.43473045988554, 0.4383309503172684, 0.4409884859402103, 0.4430309389962837, 0.444649849401104, 0.6875, 0.4927602499618962, 0.5096031427383425, 0.5189636628480, 0.5249280070771274, 0.5290623300865124, 0.5320974248125793, 0.5344204730474308, 0.536255847400756, 0.7265625, 0.5496019313526808, 0.5669248746708034, 0.576436455045805, 0.5824538812831795, 0.5866053321547824, 0.589642781414643, 0.5919618019300193, 0.593790427805202, 0.75390625, 0.590868349763505, 0.607983393277209, 0.617303847446822, 0.623172512167948, 0.627208862156123, 0.6301556891501057, 0.632401894928977, 0.6341708982290303, 0.7744140625, 0.622562037497196, 0.639236102912278, 0.648263335014579, 0.65392850011132, 0.657816519817211, 0.660650782947676, 0.662808780346311, 0.6645068560246006, 0.7905273437499996, 0.6478843304312477, 0.6640468318879372, 0.6727589686071775, 0.6782129857784873, 0.681950188903695, 0.684671508668418, 0.686741824999918, 0.688369886732168, 0.803619384765625, 0.668716055304315, 0.684360013879534, 0.6927642396829181, 0.6980155964704895, 0.701609591890657, 0.7042244320992127, 0.7062125081341817, 0.707775152962577, 0.8145294189453126, 0.686243374488305, 0.7013873696358975, 0.709501223328243, 0.714563595144314, 0.718024953392931, 0.7205416252126137, 0.722454130389843, 0.723956813292035, 0.823802947998047, 0.701255953767043, 0.715928221686075, 0.723772209289768, 0.7286603031173616, 0.7319999279787631, 0.7344267920995765, 0.736270323773157, 0.737718376096348 ]) res4_p1 = [stats.binom_test(v+1, v*k, 1./k) for v in range(1, 11) for k in range(2, 11)] res4_m1 = [stats.binom_test(v-1, v*k, 1./k) for v in range(1, 11) for k in range(2, 11)] assert_almost_equal(res4_p1, binom_testp1, decimal=13) assert_almost_equal(res4_m1, binom_testm1, decimal=13) class TestTrim(object): # test trim functions def test_trim1(self): a = np.arange(11) assert_equal(np.sort(stats.trim1(a, 0.1)), np.arange(10)) assert_equal(np.sort(stats.trim1(a, 0.2)), np.arange(9)) assert_equal(np.sort(stats.trim1(a, 0.2, tail='left')), np.arange(2, 11)) assert_equal(np.sort(stats.trim1(a, 3/11., tail='left')), np.arange(3, 11)) assert_equal(stats.trim1(a, 1.0), []) assert_equal(stats.trim1(a, 1.0, tail='left'), []) # empty input assert_equal(stats.trim1([], 0.1), []) assert_equal(stats.trim1([], 3/11., tail='left'), []) assert_equal(stats.trim1([], 4/6.), []) def test_trimboth(self): a = np.arange(11) assert_equal(np.sort(stats.trimboth(a, 3/11.)), np.arange(3, 8)) assert_equal(np.sort(stats.trimboth(a, 0.2)), np.array([2, 3, 4, 5, 6, 7, 8])) assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(6, 4), 0.2)), np.arange(4, 20).reshape(4, 4)) assert_equal(np.sort(stats.trimboth(np.arange(24).reshape(4, 6).T, 2/6.)), np.array([[2, 8, 14, 20], [3, 9, 15, 21]])) assert_raises(ValueError, stats.trimboth, np.arange(24).reshape(4, 6).T, 4/6.) # empty input assert_equal(stats.trimboth([], 0.1), []) assert_equal(stats.trimboth([], 3/11.), []) assert_equal(stats.trimboth([], 4/6.), []) def test_trim_mean(self): # don't use pre-sorted arrays a = np.array([4, 8, 2, 0, 9, 5, 10, 1, 7, 3, 6]) idx = np.array([3, 5, 0, 1, 2, 4]) a2 = np.arange(24).reshape(6, 4)[idx, :] a3 = np.arange(24).reshape(6, 4, order='F')[idx, :] assert_equal(stats.trim_mean(a3, 2/6.), np.array([2.5, 8.5, 14.5, 20.5])) assert_equal(stats.trim_mean(a2, 2/6.), np.array([10., 11., 12., 13.])) idx4 = np.array([1, 0, 3, 2]) a4 = np.arange(24).reshape(4, 6)[idx4, :] assert_equal(stats.trim_mean(a4, 2/6.), np.array([9., 10., 11., 12., 13., 14.])) # shuffled arange(24) as array_like a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23, 20, 2, 14, 4, 13, 8, 3] assert_equal(stats.trim_mean(a, 2/6.), 11.5) assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5) # check axis argument np.random.seed(1234) a = np.random.randint(20, size=(5, 6, 4, 7)) for axis in [0, 1, 2, 3, -1]: res1 = stats.trim_mean(a, 2/6., axis=axis) res2 = stats.trim_mean(np.rollaxis(a, axis), 2/6.) assert_equal(res1, res2) res1 = stats.trim_mean(a, 2/6., axis=None) res2 = stats.trim_mean(a.ravel(), 2/6.) assert_equal(res1, res2) assert_raises(ValueError, stats.trim_mean, a, 0.6) # empty input assert_equal(stats.trim_mean([], 0.0), np.nan) assert_equal(stats.trim_mean([], 0.6), np.nan) class TestSigmaClip(object): def test_sigmaclip1(self): a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) fact = 4 # default c, low, upp = stats.sigmaclip(a) assert_(c.min() > low) assert_(c.max() < upp) assert_equal(low, c.mean() - fact*c.std()) assert_equal(upp, c.mean() + fact*c.std()) assert_equal(c.size, a.size) def test_sigmaclip2(self): a = np.concatenate((np.linspace(9.5, 10.5, 31), np.linspace(0, 20, 5))) fact = 1.5 c, low, upp = stats.sigmaclip(a, fact, fact) assert_(c.min() > low) assert_(c.max() < upp) assert_equal(low, c.mean() - fact*c.std()) assert_equal(upp, c.mean() + fact*c.std()) assert_equal(c.size, 4) assert_equal(a.size, 36) # check original array unchanged def test_sigmaclip3(self): a = np.concatenate((np.linspace(9.5, 10.5, 11), np.linspace(-100, -50, 3))) fact = 1.8 c, low, upp = stats.sigmaclip(a, fact, fact) assert_(c.min() > low) assert_(c.max() < upp) assert_equal(low, c.mean() - fact*c.std()) assert_equal(upp, c.mean() + fact*c.std()) assert_equal(c, np.linspace(9.5, 10.5, 11)) def test_sigmaclip_result_attributes(self): a = np.concatenate((np.linspace(9.5, 10.5, 11), np.linspace(-100, -50, 3))) fact = 1.8 res = stats.sigmaclip(a, fact, fact) attributes = ('clipped', 'lower', 'upper') check_named_results(res, attributes) def test_std_zero(self): # regression test #8632 x = np.ones(10) assert_equal(stats.sigmaclip(x)[0], x) class TestFOneWay(object): def test_trivial(self): # A trivial test of stats.f_oneway, with F=0. F, p = stats.f_oneway([0,2], [0,2]) assert_equal(F, 0.0) def test_basic(self): # Despite being a floating point calculation, this data should # result in F being exactly 2.0. F, p = stats.f_oneway([0,2], [2,4]) assert_equal(F, 2.0) def test_large_integer_array(self): a = np.array([655, 788], dtype=np.uint16) b = np.array([789, 772], dtype=np.uint16) F, p = stats.f_oneway(a, b) assert_almost_equal(F, 0.77450216931805538) def test_result_attributes(self): a = np.array([655, 788], dtype=np.uint16) b = np.array([789, 772], dtype=np.uint16) res = stats.f_oneway(a, b) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_nist(self): # These are the nist ANOVA files. They can be found at: # http://www.itl.nist.gov/div898/strd/anova/anova.html filenames = ['SiRstv.dat', 'SmLs01.dat', 'SmLs02.dat', 'SmLs03.dat', 'AtmWtAg.dat', 'SmLs04.dat', 'SmLs05.dat', 'SmLs06.dat', 'SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat'] for test_case in filenames: rtol = 1e-7 fname = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data/nist_anova', test_case)) with open(fname, 'r') as f: content = f.read().split('\n') certified = [line.split() for line in content[40:48] if line.strip()] dataf = np.loadtxt(fname, skiprows=60) y, x = dataf.T y = y.astype(int) caty = np.unique(y) f = float(certified[0][-1]) xlist = [x[y == i] for i in caty] res = stats.f_oneway(*xlist) # With the hard test cases we relax the tolerance a bit. hard_tc = ('SmLs07.dat', 'SmLs08.dat', 'SmLs09.dat') if test_case in hard_tc: rtol = 1e-4 assert_allclose(res[0], f, rtol=rtol, err_msg='Failing testcase: %s' % test_case) class TestKruskal(object): def test_simple(self): x = [1] y = [2] h, p = stats.kruskal(x, y) assert_equal(h, 1.0) assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) h, p = stats.kruskal(np.array(x), np.array(y)) assert_equal(h, 1.0) assert_approx_equal(p, stats.distributions.chi2.sf(h, 1)) def test_basic(self): x = [1, 3, 5, 7, 9] y = [2, 4, 6, 8, 10] h, p = stats.kruskal(x, y) assert_approx_equal(h, 3./11, significant=10) assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) h, p = stats.kruskal(np.array(x), np.array(y)) assert_approx_equal(h, 3./11, significant=10) assert_approx_equal(p, stats.distributions.chi2.sf(3./11, 1)) def test_simple_tie(self): x = [1] y = [1, 2] h_uncorr = 1.5**2 + 2*2.25**2 - 12 corr = 0.75 expected = h_uncorr / corr # 0.5 h, p = stats.kruskal(x, y) # Since the expression is simple and the exact answer is 0.5, it # should be safe to use assert_equal(). assert_equal(h, expected) def test_another_tie(self): x = [1, 1, 1, 2] y = [2, 2, 2, 2] h_uncorr = (12. / 8. / 9.) * 4 * (3**2 + 6**2) - 3 * 9 corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) expected = h_uncorr / corr h, p = stats.kruskal(x, y) assert_approx_equal(h, expected) def test_three_groups(self): # A test of stats.kruskal with three groups, with ties. x = [1, 1, 1] y = [2, 2, 2] z = [2, 2] h_uncorr = (12. / 8. / 9.) * (3*2**2 + 3*6**2 + 2*6**2) - 3 * 9 # 5.0 corr = 1 - float(3**3 - 3 + 5**3 - 5) / (8**3 - 8) expected = h_uncorr / corr # 7.0 h, p = stats.kruskal(x, y, z) assert_approx_equal(h, expected) assert_approx_equal(p, stats.distributions.chi2.sf(h, 2)) def test_empty(self): # A test of stats.kruskal with three groups, with ties. x = [1, 1, 1] y = [2, 2, 2] z = [] assert_equal(stats.kruskal(x, y, z), (np.nan, np.nan)) def test_kruskal_result_attributes(self): x = [1, 3, 5, 7, 9] y = [2, 4, 6, 8, 10] res = stats.kruskal(x, y) attributes = ('statistic', 'pvalue') check_named_results(res, attributes) def test_nan_policy(self): x = np.arange(10.) x[9] = np.nan assert_equal(stats.kruskal(x, x), (np.nan, np.nan)) assert_almost_equal(stats.kruskal(x, x, nan_policy='omit'), (0.0, 1.0)) assert_raises(ValueError, stats.kruskal, x, x, nan_policy='raise') assert_raises(ValueError, stats.kruskal, x, x, nan_policy='foobar') class TestCombinePvalues(object): def test_fisher(self): # Example taken from http://en.wikipedia.org/wiki/Fisher's_exact_test#Example xsq, p = stats.combine_pvalues([.01, .2, .3], method='fisher') assert_approx_equal(p, 0.02156, significant=4) def test_stouffer(self): Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer') assert_approx_equal(p, 0.01651, significant=4) def test_stouffer2(self): Z, p = stats.combine_pvalues([.5, .5, .5], method='stouffer') assert_approx_equal(p, 0.5, significant=4) def test_weighted_stouffer(self): Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', weights=np.ones(3)) assert_approx_equal(p, 0.01651, significant=4) def test_weighted_stouffer2(self): Z, p = stats.combine_pvalues([.01, .2, .3], method='stouffer', weights=np.array((1, 4, 9))) assert_approx_equal(p, 0.1464, significant=4) class TestCdfDistanceValidation(object): """ Test that _cdf_distance() (via wasserstein_distance()) raises ValueErrors for bad inputs. """ def test_distinct_value_and_weight_lengths(self): # When the number of weights does not match the number of values, # a ValueError should be raised. assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [4], [3, 1]) assert_raises(ValueError, stats.wasserstein_distance, [1], [2], [1, 0]) def test_zero_weight(self): # When a distribution is given zero weight, a ValueError should be # raised. assert_raises(ValueError, stats.wasserstein_distance, [0, 1], [2], [0, 0]) assert_raises(ValueError, stats.wasserstein_distance, [0, 1], [2], [3, 1], [0]) def test_negative_weights(self): # A ValueError should be raised if there are any negative weights. assert_raises(ValueError, stats.wasserstein_distance, [0, 1], [2, 2], [1, 1], [3, -1]) def test_empty_distribution(self): # A ValueError should be raised when trying to measure the distance # between something and nothing. assert_raises(ValueError, stats.wasserstein_distance, [], [2, 2]) assert_raises(ValueError, stats.wasserstein_distance, [1], []) def test_inf_weight(self): # An inf weight is not valid. assert_raises(ValueError, stats.wasserstein_distance, [1, 2, 1], [1, 1], [1, np.inf, 1], [1, 1]) class TestWassersteinDistance(object): """ Tests for wasserstein_distance() output values. """ def test_simple(self): # For basic distributions, the value of the Wasserstein distance is # straightforward. assert_almost_equal( stats.wasserstein_distance([0, 1], [0], [1, 1], [1]), .5) assert_almost_equal(stats.wasserstein_distance( [0, 1], [0], [3, 1], [1]), .25) assert_almost_equal(stats.wasserstein_distance( [0, 2], [0], [1, 1], [1]), 1) assert_almost_equal(stats.wasserstein_distance( [0, 1, 2], [1, 2, 3]), 1) def test_same_distribution(self): # Any distribution moved to itself should have a Wasserstein distance of # zero. assert_equal(stats.wasserstein_distance([1, 2, 3], [2, 1, 3]), 0) assert_equal( stats.wasserstein_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]), 0) def test_shift(self): # If the whole distribution is shifted by x, then the Wasserstein # distance should be x. assert_almost_equal(stats.wasserstein_distance([0], [1]), 1) assert_almost_equal(stats.wasserstein_distance([-5], [5]), 10) assert_almost_equal( stats.wasserstein_distance([1, 2, 3, 4, 5], [11, 12, 13, 14, 15]), 10) assert_almost_equal( stats.wasserstein_distance([4.5, 6.7, 2.1], [4.6, 7, 9.2], [3, 1, 1], [1, 3, 1]), 2.5) def test_combine_weights(self): # Assigning a weight w to a value is equivalent to including that value # w times in the value array with weight of 1. assert_almost_equal( stats.wasserstein_distance( [0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), stats.wasserstein_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4])) def test_collapse(self): # Collapsing a distribution to a point distribution at zero is # equivalent to taking the average of the absolute values of the values. u = np.arange(-10, 30, 0.3) v = np.zeros_like(u) assert_almost_equal( stats.wasserstein_distance(u, v), np.mean(np.abs(u))) u_weights = np.arange(len(u)) v_weights = u_weights[::-1] assert_almost_equal( stats.wasserstein_distance(u, v, u_weights, v_weights), np.average(np.abs(u), weights=u_weights)) def test_zero_weight(self): # Values with zero weight have no impact on the Wasserstein distance. assert_almost_equal( stats.wasserstein_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]), stats.wasserstein_distance([1, 2], [1, 1], [1, 1], [1, 1])) def test_inf_values(self): # Inf values can lead to an inf distance or trigger a RuntimeWarning # (and return NaN) if the distance is undefined. assert_equal( stats.wasserstein_distance([1, 2, np.inf], [1, 1]), np.inf) assert_equal( stats.wasserstein_distance([1, 2, np.inf], [-np.inf, 1]), np.inf) assert_equal( stats.wasserstein_distance([1, -np.inf, np.inf], [1, 1]), np.inf) with suppress_warnings() as sup: r = sup.record(RuntimeWarning, "invalid value*") assert_equal( stats.wasserstein_distance([1, 2, np.inf], [np.inf, 1]), np.nan) class TestEnergyDistance(object): """ Tests for energy_distance() output values. """ def test_simple(self): # For basic distributions, the value of the energy distance is # straightforward. assert_almost_equal( stats.energy_distance([0, 1], [0], [1, 1], [1]), np.sqrt(2) * .5) assert_almost_equal(stats.energy_distance( [0, 1], [0], [3, 1], [1]), np.sqrt(2) * .25) assert_almost_equal(stats.energy_distance( [0, 2], [0], [1, 1], [1]), 2 * .5) assert_almost_equal( stats.energy_distance([0, 1, 2], [1, 2, 3]), np.sqrt(2) * (3*(1./3**2))**.5) def test_same_distribution(self): # Any distribution moved to itself should have a energy distance of # zero. assert_equal(stats.energy_distance([1, 2, 3], [2, 1, 3]), 0) assert_equal( stats.energy_distance([1, 1, 1, 4], [4, 1], [1, 1, 1, 1], [1, 3]), 0) def test_shift(self): # If a single-point distribution is shifted by x, then the energy # distance should be sqrt(2) * sqrt(x). assert_almost_equal(stats.energy_distance([0], [1]), np.sqrt(2)) assert_almost_equal( stats.energy_distance([-5], [5]), np.sqrt(2) * 10**.5) def test_combine_weights(self): # Assigning a weight w to a value is equivalent to including that value # w times in the value array with weight of 1. assert_almost_equal( stats.energy_distance([0, 0, 1, 1, 1, 1, 5], [0, 3, 3, 3, 3, 4, 4], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]), stats.energy_distance([5, 0, 1], [0, 4, 3], [1, 2, 4], [1, 2, 4])) def test_zero_weight(self): # Values with zero weight have no impact on the energy distance. assert_almost_equal( stats.energy_distance([1, 2, 100000], [1, 1], [1, 1, 0], [1, 1]), stats.energy_distance([1, 2], [1, 1], [1, 1], [1, 1])) def test_inf_values(self): # Inf values can lead to an inf distance or trigger a RuntimeWarning # (and return NaN) if the distance is undefined. assert_equal(stats.energy_distance([1, 2, np.inf], [1, 1]), np.inf) assert_equal( stats.energy_distance([1, 2, np.inf], [-np.inf, 1]), np.inf) assert_equal( stats.energy_distance([1, -np.inf, np.inf], [1, 1]), np.inf) with suppress_warnings() as sup: r = sup.record(RuntimeWarning, "invalid value*") assert_equal( stats.energy_distance([1, 2, np.inf], [np.inf, 1]), np.nan)
170,720
39.000234
134
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/scipy/stats/tests/test_mstats_basic.py
""" Tests for the stats.mstats module (support for masked arrays) """ from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy import nan import numpy.ma as ma from numpy.ma import masked, nomask import scipy.stats.mstats as mstats from scipy import stats from .common_tests import check_named_results import pytest from pytest import raises as assert_raises from numpy.ma.testutils import (assert_equal, assert_almost_equal, assert_array_almost_equal, assert_array_almost_equal_nulp, assert_, assert_allclose, assert_array_equal) from scipy._lib._numpy_compat import suppress_warnings class TestMquantiles(object): def test_mquantiles_limit_keyword(self): # Regression test for Trac ticket #867 data = np.array([[6., 7., 1.], [47., 15., 2.], [49., 36., 3.], [15., 39., 4.], [42., 40., -999.], [41., 41., -999.], [7., -999., -999.], [39., -999., -999.], [43., -999., -999.], [40., -999., -999.], [36., -999., -999.]]) desired = [[19.2, 14.6, 1.45], [40.0, 37.5, 2.5], [42.8, 40.05, 3.55]] quants = mstats.mquantiles(data, axis=0, limit=(0, 50)) assert_almost_equal(quants, desired) class TestGMean(object): def test_1D(self): a = (1,2,3,4) actual = mstats.gmean(a) desired = np.power(1*2*3*4,1./4.) assert_almost_equal(actual, desired, decimal=14) desired1 = mstats.gmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=14) assert_(not isinstance(desired1, ma.MaskedArray)) a = ma.array((1,2,3,4),mask=(0,0,0,1)) actual = mstats.gmean(a) desired = np.power(1*2*3,1./3.) assert_almost_equal(actual, desired,decimal=14) desired1 = mstats.gmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=14) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') def test_1D_float96(self): a = ma.array((1,2,3,4), mask=(0,0,0,1)) actual_dt = mstats.gmean(a, dtype=np.float96) desired_dt = np.power(1 * 2 * 3, 1. / 3.).astype(np.float96) assert_almost_equal(actual_dt, desired_dt, decimal=14) assert_(actual_dt.dtype == desired_dt.dtype) def test_2D(self): a = ma.array(((1, 2, 3, 4), (1, 2, 3, 4), (1, 2, 3, 4)), mask=((0, 0, 0, 0), (1, 0, 0, 1), (0, 1, 1, 0))) actual = mstats.gmean(a) desired = np.array((1,2,3,4)) assert_array_almost_equal(actual, desired, decimal=14) desired1 = mstats.gmean(a,axis=0) assert_array_almost_equal(actual, desired1, decimal=14) actual = mstats.gmean(a, -1) desired = ma.array((np.power(1*2*3*4,1./4.), np.power(2*3,1./2.), np.power(1*4,1./2.))) assert_array_almost_equal(actual, desired, decimal=14) class TestHMean(object): def test_1D(self): a = (1,2,3,4) actual = mstats.hmean(a) desired = 4. / (1./1 + 1./2 + 1./3 + 1./4) assert_almost_equal(actual, desired, decimal=14) desired1 = mstats.hmean(ma.array(a),axis=-1) assert_almost_equal(actual, desired1, decimal=14) a = ma.array((1,2,3,4),mask=(0,0,0,1)) actual = mstats.hmean(a) desired = 3. / (1./1 + 1./2 + 1./3) assert_almost_equal(actual, desired,decimal=14) desired1 = mstats.hmean(a,axis=-1) assert_almost_equal(actual, desired1, decimal=14) @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping') def test_1D_float96(self): a = ma.array((1,2,3,4), mask=(0,0,0,1)) actual_dt = mstats.hmean(a, dtype=np.float96) desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96) assert_almost_equal(actual_dt, desired_dt, decimal=14) assert_(actual_dt.dtype == desired_dt.dtype) def test_2D(self): a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)), mask=((0,0,0,0),(1,0,0,1),(0,1,1,0))) actual = mstats.hmean(a) desired = ma.array((1,2,3,4)) assert_array_almost_equal(actual, desired, decimal=14) actual1 = mstats.hmean(a,axis=-1) desired = (4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.) ) assert_array_almost_equal(actual1, desired, decimal=14) class TestRanking(object): def test_ranking(self): x = ma.array([0,1,1,1,2,3,4,5,5,6,]) assert_almost_equal(mstats.rankdata(x), [1,3,3,3,5,6,7,8.5,8.5,10]) x[[3,4]] = masked assert_almost_equal(mstats.rankdata(x), [1,2.5,2.5,0,0,4,5,6.5,6.5,8]) assert_almost_equal(mstats.rankdata(x, use_missing=True), [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8]) x = ma.array([0,1,5,1,2,4,3,5,1,6,]) assert_almost_equal(mstats.rankdata(x), [1,3,8.5,3,5,7,6,8.5,3,10]) x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]]) assert_almost_equal(mstats.rankdata(x), [[1,3,3,3,5], [6,7,8.5,8.5,10]]) assert_almost_equal(mstats.rankdata(x, axis=1), [[1,3,3,3,5], [1,2,3.5,3.5,5]]) assert_almost_equal(mstats.rankdata(x,axis=0), [[1,1,1,1,1], [2,2,2,2,2,]]) class TestCorr(object): def test_pearsonr(self): # Tests some computations of Pearson's r x = ma.arange(10) with warnings.catch_warnings(): # The tests in this context are edge cases, with perfect # correlation or anticorrelation, or totally masked data. # None of these should trigger a RuntimeWarning. warnings.simplefilter("error", RuntimeWarning) assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0) assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0) x = ma.array(x, mask=True) pr = mstats.pearsonr(x, x) assert_(pr[0] is masked) assert_(pr[1] is masked) x1 = ma.array([-1.0, 0.0, 1.0]) y1 = ma.array([0, 0, 3]) r, p = mstats.pearsonr(x1, y1) assert_almost_equal(r, np.sqrt(3)/2) assert_almost_equal(p, 1.0/3) # (x2, y2) have the same unmasked data as (x1, y1). mask = [False, False, False, True] x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask) y2 = ma.array([0, 0, 3, -1], mask=mask) r, p = mstats.pearsonr(x2, y2) assert_almost_equal(r, np.sqrt(3)/2) assert_almost_equal(p, 1.0/3) def test_spearmanr(self): # Tests some computations of Spearman's rho (x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95]) assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan]) (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555) x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7] y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4] assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1, 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan] y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6, 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan] (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y)) assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299) # Next test is to make sure calculation uses sufficient precision. # The denominator's value is ~n^3 and used to be represented as an # int. 2000**3 > 2**32 so these arrays would cause overflow on # some machines. x = list(range(2000)) y = list(range(2000)) y[0], y[9] = y[9], y[0] y[10], y[434] = y[434], y[10] y[435], y[1509] = y[1509], y[435] # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1)) # = 1 - (1 / 500) # = 0.998 assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998) # test for namedtuple attributes res = mstats.spearmanr(x, y) attributes = ('correlation', 'pvalue') check_named_results(res, attributes, ma=True) def test_kendalltau(self): # Tests some computations of Kendall's tau x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan]) y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan]) z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan]) assert_almost_equal(np.asarray(mstats.kendalltau(x,y)), [+0.3333333,0.4969059]) assert_almost_equal(np.asarray(mstats.kendalltau(x,z)), [-0.5477226,0.2785987]) # x = ma.fix_invalid([0, 0, 0, 0,20,20, 0,60, 0,20, 10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan]) y = ma.fix_invalid([0,80,80,80,10,33,60, 0,67,27, 25,80,80,80,80,80,80, 0,10,45, np.nan, 0]) result = mstats.kendalltau(x,y) assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009]) # make sure internal variable use correct precision with # larger arrays x = np.arange(2000, dtype=float) x = ma.masked_greater(x, 1995) y = np.arange(2000, dtype=float) y = np.concatenate((y[1000:], y[:1000])) assert_(np.isfinite(mstats.kendalltau(x,y)[1])) # test for namedtuple attributes res = mstats.kendalltau(x, y) attributes = ('correlation', 'pvalue') check_named_results(res, attributes, ma=True) def test_kendalltau_seasonal(self): # Tests the seasonal Kendall tau. x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] x = ma.fix_invalid(x).T output = mstats.kendalltau_seasonal(x) assert_almost_equal(output['global p-value (indep)'], 0.008, 3) assert_almost_equal(output['seasonal p-value'].round(2), [0.18,0.53,0.20,0.04]) def test_pointbiserial(self): x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0, 0,0,0,0,1,-1] y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0, 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1, 0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan] assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5) # test for namedtuple attributes res = mstats.pointbiserialr(x, y) attributes = ('correlation', 'pvalue') check_named_results(res, attributes, ma=True) class TestTrimming(object): def test_trim(self): a = ma.arange(10) assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9]) a = ma.arange(10) assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None]) a = ma.arange(10) assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)), [None,None,None,3,4,5,6,7,None,None]) a = ma.arange(10) assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True), [None,1,2,3,4,5,6,7,None,None]) a = ma.arange(12) a[[0,-1]] = a[5] = masked assert_equal(mstats.trim(a, (2,8)), [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None]) x = ma.arange(100).reshape(10, 10) expected = [1]*10 + [0]*70 + [1]*20 trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) assert_equal(trimx._mask.ravel(), expected) trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) assert_equal(trimx._mask.ravel(), expected) trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1) assert_equal(trimx._mask.T.ravel(), expected) # same as above, but with an extra masked row inserted x = ma.arange(110).reshape(11, 10) x[1] = masked expected = [1]*20 + [0]*70 + [1]*20 trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None) assert_equal(trimx._mask.ravel(), expected) trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0) assert_equal(trimx._mask.ravel(), expected) trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1) assert_equal(trimx.T._mask.ravel(), expected) def test_trim_old(self): x = ma.arange(100) assert_equal(mstats.trimboth(x).count(), 60) assert_equal(mstats.trimtail(x,tail='r').count(), 80) x[50:70] = masked trimx = mstats.trimboth(x) assert_equal(trimx.count(), 48) assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16) x._mask = nomask x.shape = (10,10) assert_equal(mstats.trimboth(x).count(), 60) assert_equal(mstats.trimtail(x).count(), 80) def test_trimmedmean(self): data = ma.array([77, 87, 88,114,151,210,219,246,253,262, 296,299,306,376,428,515,666,1310,2611]) assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0) assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0) assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0) def test_trimmed_stde(self): data = ma.array([77, 87, 88,114,151,210,219,246,253,262, 296,299,306,376,428,515,666,1310,2611]) assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5) assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5) def test_winsorization(self): data = ma.array([77, 87, 88,114,151,210,219,246,253,262, 296,299,306,376,428,515,666,1310,2611]) assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1), 21551.4, 1) assert_almost_equal( mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1), 11887.3, 1) data[5] = masked winsorized = mstats.winsorize(data) assert_equal(winsorized.mask, data.mask) class TestMoments(object): # Comparison numbers are found using R v.1.5.1 # note that length(testcase) = 4 # testmathworks comes from documentation for the # Statistics Toolbox for Matlab and can be found at both # http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml # http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml # Note that both test cases came from here. testcase = [1,2,3,4] testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965, np.nan]) testcase_2d = ma.array( np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149], [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407], [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733], [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998], [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]), mask=np.array([[True, False, False, True, False], [True, True, True, False, True], [False, False, False, False, False], [True, True, True, True, True], [False, False, True, False, False]], dtype=bool)) def test_moment(self): y = mstats.moment(self.testcase,1) assert_almost_equal(y,0.0,10) y = mstats.moment(self.testcase,2) assert_almost_equal(y,1.25) y = mstats.moment(self.testcase,3) assert_almost_equal(y,0.0) y = mstats.moment(self.testcase,4) assert_almost_equal(y,2.5625) def test_variation(self): y = mstats.variation(self.testcase) assert_almost_equal(y,0.44721359549996, 10) def test_skewness(self): y = mstats.skew(self.testmathworks) assert_almost_equal(y,-0.29322304336607,10) y = mstats.skew(self.testmathworks,bias=0) assert_almost_equal(y,-0.437111105023940,10) y = mstats.skew(self.testcase) assert_almost_equal(y,0.0,10) def test_kurtosis(self): # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis # for compatibility with Matlab) y = mstats.kurtosis(self.testmathworks,0,fisher=0,bias=1) assert_almost_equal(y, 2.1658856802973,10) # Note that MATLAB has confusing docs for the following case # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3) # The MATLAB docs imply that both should give Fisher's y = mstats.kurtosis(self.testmathworks,fisher=0, bias=0) assert_almost_equal(y, 3.663542721189047,10) y = mstats.kurtosis(self.testcase,0,0) assert_almost_equal(y,1.64) # test that kurtosis works on multidimensional masked arrays correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0., -1.26979517952]), mask=np.array([False, False, False, True, False], dtype=bool)) assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1), correct_2d) for i, row in enumerate(self.testcase_2d): assert_almost_equal(mstats.kurtosis(row), correct_2d[i]) correct_2d_bias_corrected = ma.array( np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]), mask=np.array([False, False, False, True, False], dtype=bool)) assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1, bias=False), correct_2d_bias_corrected) for i, row in enumerate(self.testcase_2d): assert_almost_equal(mstats.kurtosis(row, bias=False), correct_2d_bias_corrected[i]) # Check consistency between stats and mstats implementations assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]), stats.kurtosis(self.testcase_2d[2, :]), nulp=4) def test_mode(self): a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7] a2 = np.reshape(a1, (3,5)) a3 = np.array([1,2,3,4,5,6]) a4 = np.reshape(a3, (3,2)) ma1 = ma.masked_where(ma.array(a1) > 2, a1) ma2 = ma.masked_where(a2 > 2, a2) ma3 = ma.masked_where(a3 < 2, a3) ma4 = ma.masked_where(ma.array(a4) < 2, a4) assert_equal(mstats.mode(a1, axis=None), (3,4)) assert_equal(mstats.mode(a1, axis=0), (3,4)) assert_equal(mstats.mode(ma1, axis=None), (0,3)) assert_equal(mstats.mode(a2, axis=None), (3,4)) assert_equal(mstats.mode(ma2, axis=None), (0,3)) assert_equal(mstats.mode(a3, axis=None), (1,1)) assert_equal(mstats.mode(ma3, axis=None), (2,1)) assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]])) assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]])) assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]])) assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]])) assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]])) a1_res = mstats.mode(a1, axis=None) # test for namedtuple attributes attributes = ('mode', 'count') check_named_results(a1_res, attributes, ma=True) def test_mode_modifies_input(self): # regression test for gh-6428: mode(..., axis=None) may not modify # the input array im = np.zeros((100, 100)) im[:50, :] += 1 im[:, :50] += 1 cp = im.copy() a = mstats.mode(im, None) assert_equal(im, cp) class TestPercentile(object): def setup_method(self): self.a1 = [3,4,5,10,-3,-5,6] self.a2 = [3,-6,-2,8,7,4,2,1] self.a3 = [3.,4,5,10,-3,-5,-6,7.0] def test_percentile(self): x = np.arange(8) * 0.5 assert_equal(mstats.scoreatpercentile(x, 0), 0.) assert_equal(mstats.scoreatpercentile(x, 100), 3.5) assert_equal(mstats.scoreatpercentile(x, 50), 1.75) def test_2D(self): x = ma.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) assert_equal(mstats.scoreatpercentile(x,50), [1,1,1]) class TestVariability(object): """ Comparison numbers are found using R v.1.5.1 note that length(testcase) = 4 """ testcase = ma.fix_invalid([1,2,3,4,np.nan]) def test_sem(self): # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3) y = mstats.sem(self.testcase) assert_almost_equal(y, 0.6454972244) n = self.testcase.count() assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)), mstats.sem(self.testcase, ddof=2)) def test_zmap(self): # This is not in R, so tested by using: # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) y = mstats.zmap(self.testcase, self.testcase) desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999]) assert_array_almost_equal(desired_unmaskedvals, y.data[y.mask == False], decimal=12) def test_zscore(self): # This is not in R, so tested by using: # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4) y = mstats.zscore(self.testcase) desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996, 0.44721359549996, 1.3416407864999, np.nan]) assert_almost_equal(desired, y, decimal=12) class TestMisc(object): def test_obrientransform(self): args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2, [6]+[7]*2+[8]*4+[9]*9+[10]*16] result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538], [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]] assert_almost_equal(np.round(mstats.obrientransform(*args).T,4), result,4) def test_kstwosamp(self): x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] x = ma.fix_invalid(x).T (winter,spring,summer,fall) = x.T assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring),4), (0.1818,0.9892)) assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'g'),4), (0.1469,0.7734)) assert_almost_equal(np.round(mstats.ks_twosamp(winter,spring,'l'),4), (0.1818,0.6744)) def test_friedmanchisq(self): # No missing values args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0], [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0], [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0]) result = mstats.friedmanchisquare(*args) assert_almost_equal(result[0], 10.4737, 4) assert_almost_equal(result[1], 0.005317, 6) # Missing values x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1], [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3], [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan], [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]] x = ma.fix_invalid(x) result = mstats.friedmanchisquare(*x) assert_almost_equal(result[0], 2.0156, 4) assert_almost_equal(result[1], 0.5692, 4) # test for namedtuple attributes attributes = ('statistic', 'pvalue') check_named_results(result, attributes, ma=True) def test_regress_simple(): # Regress a line with sinusoidal noise. Test for #1273. x = np.linspace(0, 100, 100) y = 0.2 * np.linspace(0, 100, 100) + 10 y += np.sin(np.linspace(0, 20, 100)) slope, intercept, r_value, p_value, sterr = mstats.linregress(x, y) assert_almost_equal(slope, 0.19644990055858422) assert_almost_equal(intercept, 10.211269918932341) # test for namedtuple attributes res = mstats.linregress(x, y) attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr') check_named_results(res, attributes, ma=True) def test_theilslopes(): # Test for basic slope and intercept. slope, intercept, lower, upper = mstats.theilslopes([0,1,1]) assert_almost_equal(slope, 0.5) assert_almost_equal(intercept, 0.5) # Test for correct masking. y = np.ma.array([0,1,100,1], mask=[False, False, True, False]) slope, intercept, lower, upper = mstats.theilslopes(y) assert_almost_equal(slope, 1./3) assert_almost_equal(intercept, 2./3) # Test of confidence intervals from example in Sen (1968). x = [1, 2, 3, 4, 10, 12, 18] y = [9, 15, 19, 20, 45, 55, 78] slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07) assert_almost_equal(slope, 4) assert_almost_equal(upper, 4.38, decimal=2) assert_almost_equal(lower, 3.71, decimal=2) def test_plotting_positions(): # Regression test for #1256 pos = mstats.plotting_positions(np.arange(3), 0, 0) assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75])) class TestNormalitytests(): def test_vs_nonmasked(self): x = np.array((-2,-1,0,1,2,3)*4)**2 assert_array_almost_equal(mstats.normaltest(x), stats.normaltest(x)) assert_array_almost_equal(mstats.skewtest(x), stats.skewtest(x)) assert_array_almost_equal(mstats.kurtosistest(x), stats.kurtosistest(x)) funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest] mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest] x = [1, 2, 3, 4] for func, mfunc in zip(funcs, mfuncs): assert_raises(ValueError, func, x) assert_raises(ValueError, mfunc, x) def test_axis_None(self): # Test axis=None (equal to axis=0 for 1-D input) x = np.array((-2,-1,0,1,2,3)*4)**2 assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x)) assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x)) assert_allclose(mstats.kurtosistest(x, axis=None), mstats.kurtosistest(x)) def test_maskedarray_input(self): # Add some masked values, test result doesn't change x = np.array((-2,-1,0,1,2,3)*4)**2 xm = np.ma.array(np.r_[np.inf, x, 10], mask=np.r_[True, [False] * x.size, True]) assert_allclose(mstats.normaltest(xm), stats.normaltest(x)) assert_allclose(mstats.skewtest(xm), stats.skewtest(x)) assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x)) def test_nd_input(self): x = np.array((-2,-1,0,1,2,3)*4)**2 x_2d = np.vstack([x] * 2).T for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]: res_1d = func(x) res_2d = func(x_2d) assert_allclose(res_2d[0], [res_1d[0]] * 2) assert_allclose(res_2d[1], [res_1d[1]] * 2) def test_normaltest_result_attributes(self): x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 res = mstats.normaltest(x) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) def test_kurtosistest_result_attributes(self): x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 res = mstats.kurtosistest(x) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) class TestFOneway(): def test_result_attributes(self): a = np.array([655, 788], dtype=np.uint16) b = np.array([789, 772], dtype=np.uint16) res = mstats.f_oneway(a, b) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) class TestMannwhitneyu(): def test_result_attributes(self): x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]) y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.]) res = mstats.mannwhitneyu(x, y) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) class TestKruskal(): def test_result_attributes(self): x = [1, 3, 5, 7, 9] y = [2, 4, 6, 8, 10] res = mstats.kruskal(x, y) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) #TODO: for all ttest functions, add tests with masked array inputs class TestTtest_rel(): def test_vs_nonmasked(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] # 1-D inputs res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1]) res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) assert_allclose(res1, res2) # 2-D inputs res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None) assert_allclose(res1, res2) res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0) assert_allclose(res1, res2) # Check default is axis=0 res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:]) assert_allclose(res2, res3) def test_fully_masked(self): np.random.seed(1234567) outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]: t, p = mstats.ttest_rel(*pair) assert_array_equal(t, (np.nan, np.nan)) assert_array_equal(p, (np.nan, np.nan)) def test_result_attributes(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1]) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) def test_invalid_input_size(self): assert_raises(ValueError, mstats.ttest_rel, np.arange(10), np.arange(11)) x = np.arange(24) assert_raises(ValueError, mstats.ttest_rel, x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1) assert_raises(ValueError, mstats.ttest_rel, x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2) def test_empty(self): res1 = mstats.ttest_rel([], []) assert_(np.all(np.isnan(res1))) def test_zero_division(self): t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) assert_equal((np.abs(t), p), (np.inf, 0)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) assert_array_equal(t, np.array([np.nan, np.nan])) assert_array_equal(p, np.array([np.nan, np.nan])) class TestTtest_ind(): def test_vs_nonmasked(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] # 1-D inputs res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1]) res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) assert_allclose(res1, res2) # 2-D inputs res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None) assert_allclose(res1, res2) res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0) assert_allclose(res1, res2) # Check default is axis=0 res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:]) assert_allclose(res2, res3) # Check equal_var res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True) assert_allclose(res4, res5) res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False) assert_allclose(res4, res5) def test_fully_masked(self): np.random.seed(1234567) outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]]) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]: t, p = mstats.ttest_ind(*pair) assert_array_equal(t, (np.nan, np.nan)) assert_array_equal(p, (np.nan, np.nan)) def test_result_attributes(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1]) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) def test_empty(self): res1 = mstats.ttest_ind([], []) assert_(np.all(np.isnan(res1))) def test_zero_division(self): t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1]) assert_equal((np.abs(t), p), (np.inf, 0)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0]) assert_array_equal(t, (np.nan, np.nan)) assert_array_equal(p, (np.nan, np.nan)) t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False) assert_equal((np.abs(t), p), (np.inf, 0)) assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0], equal_var=False), (np.nan, np.nan)) class TestTtest_1samp(): def test_vs_nonmasked(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] # 1-D inputs res1 = stats.ttest_1samp(outcome[:, 0], 1) res2 = mstats.ttest_1samp(outcome[:, 0], 1) assert_allclose(res1, res2) # 2-D inputs res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None) res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None) assert_allclose(res1, res2) res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0) res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0) assert_allclose(res1, res2) # Check default is axis=0 res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:]) assert_allclose(res2, res3) def test_fully_masked(self): np.random.seed(1234567) outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1]) expected = (np.nan, np.nan) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]: t, p = mstats.ttest_1samp(*pair) assert_array_equal(p, expected) assert_array_equal(t, expected) def test_result_attributes(self): np.random.seed(1234567) outcome = np.random.randn(20, 4) + [0, 0, 1, 2] res = mstats.ttest_1samp(outcome[:, 0], 1) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) def test_empty(self): res1 = mstats.ttest_1samp([], 1) assert_(np.all(np.isnan(res1))) def test_zero_division(self): t, p = mstats.ttest_1samp([0, 0, 0], 1) assert_equal((np.abs(t), p), (np.inf, 0)) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered in absolute") t, p = mstats.ttest_1samp([0, 0, 0], 0) assert_(np.isnan(t)) assert_array_equal(p, (np.nan, np.nan)) class TestCompareWithStats(object): """ Class to compare mstats results with stats results. It is in general assumed that scipy.stats is at a more mature stage than stats.mstats. If a routine in mstats results in similar results like in scipy.stats, this is considered also as a proper validation of scipy.mstats routine. Different sample sizes are used for testing, as some problems between stats and mstats are dependent on sample size. Author: Alexander Loew NOTE that some tests fail. This might be caused by a) actual differences or bugs between stats and mstats b) numerical inaccuracies c) different definitions of routine interfaces These failures need to be checked. Current workaround is to have disabled these tests, but issuing reports on scipy-dev """ def get_n(self): """ Returns list of sample sizes to be used for comparison. """ return [1000, 100, 10, 5] def generate_xy_sample(self, n): # This routine generates numpy arrays and corresponding masked arrays # with the same data, but additional masked values np.random.seed(1234567) x = np.random.randn(n) y = x + np.random.randn(n) xm = np.ones(len(x) + 5) * 1e16 ym = np.ones(len(y) + 5) * 1e16 xm[0:len(x)] = x ym[0:len(y)] = y mask = xm > 9e15 xm = np.ma.array(xm, mask=mask) ym = np.ma.array(ym, mask=mask) return x, y, xm, ym def generate_xy_sample2D(self, n, nx): x = np.ones((n, nx)) * np.nan y = np.ones((n, nx)) * np.nan xm = np.ones((n+5, nx)) * np.nan ym = np.ones((n+5, nx)) * np.nan for i in range(nx): x[:,i], y[:,i], dx, dy = self.generate_xy_sample(n) xm[0:n, :] = x[0:n] ym[0:n, :] = y[0:n] xm = np.ma.array(xm, mask=np.isnan(xm)) ym = np.ma.array(ym, mask=np.isnan(ym)) return x, y, xm, ym def test_linregress(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) res1 = stats.linregress(x, y) res2 = stats.mstats.linregress(xm, ym) assert_allclose(np.asarray(res1), np.asarray(res2)) def test_pearsonr(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r, p = stats.pearsonr(x, y) rm, pm = stats.mstats.pearsonr(xm, ym) assert_almost_equal(r, rm, decimal=14) assert_almost_equal(p, pm, decimal=14) def test_spearmanr(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r, p = stats.spearmanr(x, y) rm, pm = stats.mstats.spearmanr(xm, ym) assert_almost_equal(r, rm, 14) assert_almost_equal(p, pm, 14) def test_gmean(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.gmean(abs(x)) rm = stats.mstats.gmean(abs(xm)) assert_allclose(r, rm, rtol=1e-13) r = stats.gmean(abs(y)) rm = stats.mstats.gmean(abs(ym)) assert_allclose(r, rm, rtol=1e-13) def test_hmean(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.hmean(abs(x)) rm = stats.mstats.hmean(abs(xm)) assert_almost_equal(r, rm, 10) r = stats.hmean(abs(y)) rm = stats.mstats.hmean(abs(ym)) assert_almost_equal(r, rm, 10) def test_skew(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.skew(x) rm = stats.mstats.skew(xm) assert_almost_equal(r, rm, 10) r = stats.skew(y) rm = stats.mstats.skew(ym) assert_almost_equal(r, rm, 10) def test_moment(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.moment(x) rm = stats.mstats.moment(xm) assert_almost_equal(r, rm, 10) r = stats.moment(y) rm = stats.mstats.moment(ym) assert_almost_equal(r, rm, 10) def test_zscore(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) #reference solution zx = (x - x.mean()) / x.std() zy = (y - y.mean()) / y.std() #validate stats assert_allclose(stats.zscore(x), zx, rtol=1e-10) assert_allclose(stats.zscore(y), zy, rtol=1e-10) #compare stats and mstats assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]), rtol=1e-10) assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]), rtol=1e-10) def test_kurtosis(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.kurtosis(x) rm = stats.mstats.kurtosis(xm) assert_almost_equal(r, rm, 10) r = stats.kurtosis(y) rm = stats.mstats.kurtosis(ym) assert_almost_equal(r, rm, 10) def test_sem(self): # example from stats.sem doc a = np.arange(20).reshape(5,4) am = np.ma.array(a) r = stats.sem(a,ddof=1) rm = stats.mstats.sem(am, ddof=1) assert_allclose(r, 2.82842712, atol=1e-5) assert_allclose(rm, 2.82842712, atol=1e-5) for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0), stats.sem(x, axis=None, ddof=0), decimal=13) assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0), stats.sem(y, axis=None, ddof=0), decimal=13) assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1), stats.sem(x, axis=None, ddof=1), decimal=13) assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1), stats.sem(y, axis=None, ddof=1), decimal=13) def test_describe(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.describe(x, ddof=1) rm = stats.mstats.describe(xm, ddof=1) for ii in range(6): assert_almost_equal(np.asarray(r[ii]), np.asarray(rm[ii]), decimal=12) def test_describe_result_attributes(self): actual = mstats.describe(np.arange(5)) attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis') check_named_results(actual, attributes, ma=True) def test_rankdata(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.rankdata(x) rm = stats.mstats.rankdata(x) assert_allclose(r, rm) def test_tmean(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14) assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14) def test_tmax(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.tmax(x,2.), stats.mstats.tmax(xm,2.), 10) assert_almost_equal(stats.tmax(y,2.), stats.mstats.tmax(ym,2.), 10) assert_almost_equal(stats.tmax(x, upperlimit=3.), stats.mstats.tmax(xm, upperlimit=3.), 10) assert_almost_equal(stats.tmax(y, upperlimit=3.), stats.mstats.tmax(ym, upperlimit=3.), 10) def test_tmin(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_equal(stats.tmin(x),stats.mstats.tmin(xm)) assert_equal(stats.tmin(y),stats.mstats.tmin(ym)) assert_almost_equal(stats.tmin(x,lowerlimit=-1.), stats.mstats.tmin(xm,lowerlimit=-1.), 10) assert_almost_equal(stats.tmin(y,lowerlimit=-1.), stats.mstats.tmin(ym,lowerlimit=-1.), 10) def test_zmap(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) z = stats.zmap(x,y) zm = stats.mstats.zmap(xm,ym) assert_allclose(z, zm[0:len(z)], atol=1e-10) def test_variation(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.variation(x), stats.mstats.variation(xm), decimal=12) assert_almost_equal(stats.variation(y), stats.mstats.variation(ym), decimal=12) def test_tvar(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm), decimal=12) assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym), decimal=12) def test_trimboth(self): a = np.arange(20) b = stats.trimboth(a, 0.1) bm = stats.mstats.trimboth(a, 0.1) assert_allclose(np.sort(b), bm.data[~bm.mask]) def test_tsem(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) assert_almost_equal(stats.tsem(x),stats.mstats.tsem(xm), decimal=14) assert_almost_equal(stats.tsem(y),stats.mstats.tsem(ym), decimal=14) assert_almost_equal(stats.tsem(x,limits=(-2.,2.)), stats.mstats.tsem(xm,limits=(-2.,2.)), decimal=14) def test_skewtest(self): # this test is for 1D data for n in self.get_n(): if n > 8: x, y, xm, ym = self.generate_xy_sample(n) r = stats.skewtest(x) rm = stats.mstats.skewtest(xm) assert_allclose(r[0], rm[0], rtol=1e-15) # TODO this test is not performed as it is a known issue that # mstats returns a slightly different p-value what is a bit # strange is that other tests like test_maskedarray_input don't # fail! #~ assert_almost_equal(r[1], rm[1]) def test_skewtest_result_attributes(self): x = np.array((-2, -1, 0, 1, 2, 3)*4)**2 res = mstats.skewtest(x) attributes = ('statistic', 'pvalue') check_named_results(res, attributes, ma=True) def test_skewtest_2D_notmasked(self): # a normal ndarray is passed to the masked function x = np.random.random((20, 2)) * 20. r = stats.skewtest(x) rm = stats.mstats.skewtest(x) assert_allclose(np.asarray(r), np.asarray(rm)) def test_skewtest_2D_WithMask(self): nx = 2 for n in self.get_n(): if n > 8: x, y, xm, ym = self.generate_xy_sample2D(n, nx) r = stats.skewtest(x) rm = stats.mstats.skewtest(xm) assert_equal(r[0][0],rm[0][0]) assert_equal(r[0][1],rm[0][1]) def test_normaltest(self): np.seterr(over='raise') with suppress_warnings() as sup: sup.filter(UserWarning, "kurtosistest only valid for n>=20") for n in self.get_n(): if n > 8: x, y, xm, ym = self.generate_xy_sample(n) r = stats.normaltest(x) rm = stats.mstats.normaltest(xm) assert_allclose(np.asarray(r), np.asarray(rm)) def test_find_repeats(self): x = np.asarray([1,1,2,2,3,3,3,4,4,4,4]).astype('float') tmp = np.asarray([1,1,2,2,3,3,3,4,4,4,4,5,5,5,5]).astype('float') mask = (tmp == 5.) xm = np.ma.array(tmp, mask=mask) x_orig, xm_orig = x.copy(), xm.copy() r = stats.find_repeats(x) rm = stats.mstats.find_repeats(xm) assert_equal(r, rm) assert_equal(x, x_orig) assert_equal(xm, xm_orig) # This crazy behavior is expected by count_tied_groups, but is not # in the docstring... _, counts = stats.mstats.find_repeats([]) assert_equal(counts, np.array(0, dtype=np.intp)) def test_kendalltau(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.kendalltau(x, y) rm = stats.mstats.kendalltau(xm, ym) assert_almost_equal(r[0], rm[0], decimal=10) assert_almost_equal(r[1], rm[1], decimal=7) def test_obrientransform(self): for n in self.get_n(): x, y, xm, ym = self.generate_xy_sample(n) r = stats.obrientransform(x) rm = stats.mstats.obrientransform(xm) assert_almost_equal(r.T, rm[0:len(x)])
52,906
40.398279
93
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/function_docs.py
""" Extended docstrings for functions.py """ pi = r""" `\pi`, roughly equal to 3.141592654, represents the area of the unit circle, the half-period of trigonometric functions, and many other things in mathematics. Mpmath can evaluate `\pi` to arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +pi 3.1415926535897932384626433832795028841971693993751 This shows digits 99991-100000 of `\pi`:: >>> mp.dps = 100000 >>> str(pi)[-10:] '5549362464' **Possible issues** :data:`pi` always rounds to the nearest floating-point number when used. This means that exact mathematical identities involving `\pi` will generally not be preserved in floating-point arithmetic. In particular, multiples of :data:`pi` (except for the trivial case ``0*pi``) are *not* the exact roots of :func:`~mpmath.sin`, but differ roughly by the current epsilon:: >>> mp.dps = 15 >>> sin(pi) 1.22464679914735e-16 One solution is to use the :func:`~mpmath.sinpi` function instead:: >>> sinpi(1) 0.0 See the documentation of trigonometric functions for additional details. """ degree = r""" Represents one degree of angle, `1^{\circ} = \pi/180`, or about 0.01745329. This constant may be evaluated to arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +degree 0.017453292519943295769236907684886127134428718885417 The :data:`degree` object is convenient for conversion to radians:: >>> sin(30 * degree) 0.5 >>> asin(0.5) / degree 30.0 """ e = r""" The transcendental number `e` = 2.718281828... is the base of the natural logarithm (:func:`~mpmath.ln`) and of the exponential function (:func:`~mpmath.exp`). Mpmath can be evaluate `e` to arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +e 2.7182818284590452353602874713526624977572470937 This shows digits 99991-100000 of `e`:: >>> mp.dps = 100000 >>> str(e)[-10:] '2100427165' **Possible issues** :data:`e` always rounds to the nearest floating-point number when used, and mathematical identities involving `e` may not hold in floating-point arithmetic. For example, ``ln(e)`` might not evaluate exactly to 1. In particular, don't use ``e**x`` to compute the exponential function. Use ``exp(x)`` instead; this is both faster and more accurate. """ phi = r""" Represents the golden ratio `\phi = (1+\sqrt 5)/2`, approximately equal to 1.6180339887. To high precision, its value is:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +phi 1.6180339887498948482045868343656381177203091798058 Formulas for the golden ratio include the following:: >>> (1+sqrt(5))/2 1.6180339887498948482045868343656381177203091798058 >>> findroot(lambda x: x**2-x-1, 1) 1.6180339887498948482045868343656381177203091798058 >>> limit(lambda n: fib(n+1)/fib(n), inf) 1.6180339887498948482045868343656381177203091798058 """ euler = r""" Euler's constant or the Euler-Mascheroni constant `\gamma` = 0.57721566... is a number of central importance to number theory and special functions. It is defined as the limit .. math :: \gamma = \lim_{n\to\infty} H_n - \log n where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic number (see :func:`~mpmath.harmonic`). Evaluation of `\gamma` is supported at arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +euler 0.57721566490153286060651209008240243104215933593992 We can also compute `\gamma` directly from the definition, although this is less efficient:: >>> limit(lambda n: harmonic(n)-log(n), inf) 0.57721566490153286060651209008240243104215933593992 This shows digits 9991-10000 of `\gamma`:: >>> mp.dps = 10000 >>> str(euler)[-10:] '4679858165' Integrals, series, and representations for `\gamma` in terms of special functions include the following (there are many others):: >>> mp.dps = 25 >>> -quad(lambda x: exp(-x)*log(x), [0,inf]) 0.5772156649015328606065121 >>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1]) 0.5772156649015328606065121 >>> nsum(lambda k: 1/k-log(1+1/k), [1,inf]) 0.5772156649015328606065121 >>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf]) 0.5772156649015328606065121 >>> -diff(gamma, 1) 0.5772156649015328606065121 >>> limit(lambda x: 1/x-gamma(x), 0) 0.5772156649015328606065121 >>> limit(lambda x: zeta(x)-1/(x-1), 1) 0.5772156649015328606065121 >>> (log(2*pi*nprod(lambda n: ... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2 0.5772156649015328606065121 For generalizations of the identities `\gamma = -\Gamma'(1)` and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see :func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively. """ catalan = r""" Catalan's constant `K` = 0.91596559... is given by the infinite series .. math :: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}. Mpmath can evaluate it to arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +catalan 0.91596559417721901505460351493238411077414937428167 One can also compute `K` directly from the definition, although this is significantly less efficient:: >>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf]) 0.91596559417721901505460351493238411077414937428167 This shows digits 9991-10000 of `K`:: >>> mp.dps = 10000 >>> str(catalan)[-10:] '9537871503' Catalan's constant has numerous integral representations:: >>> mp.dps = 50 >>> quad(lambda x: -log(x)/(1+x**2), [0, 1]) 0.91596559417721901505460351493238411077414937428167 >>> quad(lambda x: atan(x)/x, [0, 1]) 0.91596559417721901505460351493238411077414937428167 >>> quad(lambda x: ellipk(x**2)/2, [0, 1]) 0.91596559417721901505460351493238411077414937428167 >>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1]) 0.91596559417721901505460351493238411077414937428167 As well as series representations:: >>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n: ... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8 0.91596559417721901505460351493238411077414937428167 >>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf]) 0.91596559417721901505460351493238411077414937428167 """ khinchin = r""" Khinchin's constant `K` = 2.68542... is a number that appears in the theory of continued fractions. Mpmath can evaluate it to arbitrary precision:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +khinchin 2.6854520010653064453097148354817956938203822939945 An integral representation is:: >>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1]) >>> 2*exp(1/log(2)*I) 2.6854520010653064453097148354817956938203822939945 The computation of ``khinchin`` is based on an efficient implementation of the following series:: >>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k) ... for k in range(1,2*int(n))) >>> exp(nsum(f, [1,inf])/log(2)) 2.6854520010653064453097148354817956938203822939945 """ glaisher = r""" Glaisher's constant `A`, also known as the Glaisher-Kinkelin constant, is a number approximately equal to 1.282427129 that sometimes appears in formulas related to gamma and zeta functions. It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`). The constant is defined as `A = \exp(1/12-\zeta'(-1))` where `\zeta'(s)` denotes the derivative of the Riemann zeta function (see :func:`~mpmath.zeta`). Mpmath can evaluate Glaisher's constant to arbitrary precision: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +glaisher 1.282427129100622636875342568869791727767688927325 We can verify that the value computed by :data:`glaisher` is correct using mpmath's facilities for numerical differentiation and arbitrary evaluation of the zeta function: >>> exp(mpf(1)/12 - diff(zeta, -1)) 1.282427129100622636875342568869791727767688927325 Here is an example of an integral that can be evaluated in terms of Glaisher's constant: >>> mp.dps = 15 >>> quad(lambda x: log(gamma(x)), [1, 1.5]) -0.0428537406502909 >>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2 -0.042853740650291 Mpmath computes Glaisher's constant by applying Euler-Maclaurin summation to a slowly convergent series. The implementation is reasonably efficient up to about 10,000 digits. See the source code for additional details. References: http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html """ apery = r""" Represents Apery's constant, which is the irrational number approximately equal to 1.2020569 given by .. math :: \zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}. The calculation is based on an efficient hypergeometric series. To 50 decimal places, the value is given by:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +apery 1.2020569031595942853997381615114499907649862923405 Other ways to evaluate Apery's constant using mpmath include:: >>> zeta(3) 1.2020569031595942853997381615114499907649862923405 >>> -psi(2,1)/2 1.2020569031595942853997381615114499907649862923405 >>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7 1.2020569031595942853997381615114499907649862923405 >>> f = lambda k: 2/k**3/(exp(2*pi*k)-1) >>> 7*pi**3/180 - nsum(f, [1,inf]) 1.2020569031595942853997381615114499907649862923405 This shows digits 9991-10000 of Apery's constant:: >>> mp.dps = 10000 >>> str(apery)[-10:] '3189504235' """ mertens = r""" Represents the Mertens or Meissel-Mertens constant, which is the prime number analog of Euler's constant: .. math :: B_1 = \lim_{N\to\infty} \left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right) Here `p_k` denotes the `k`-th prime number. Other names for this constant include the Hadamard-de la Vallee-Poussin constant or the prime reciprocal constant. The following gives the Mertens constant to 50 digits:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +mertens 0.2614972128476427837554268386086958590515666482612 References: http://mathworld.wolfram.com/MertensConstant.html """ twinprime = r""" Represents the twin prime constant, which is the factor `C_2` featuring in the Hardy-Littlewood conjecture for the growth of the twin prime counting function, .. math :: \pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}. It is given by the product over primes .. math :: C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016 Computing `C_2` to 50 digits:: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> +twinprime 0.66016181584686957392781211001455577843262336028473 References: http://mathworld.wolfram.com/TwinPrimesConstant.html """ ln = r""" Computes the natural logarithm of `x`, `\ln x`. See :func:`~mpmath.log` for additional documentation.""" sqrt = r""" ``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`. For positive real numbers, the principal root is simply the positive square root. For arbitrary complex numbers, the principal square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`. The function thus has a branch cut along the negative half real axis. For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to performing ``x**0.5``. **Examples** Basic examples and limits:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> sqrt(10) 3.16227766016838 >>> sqrt(100) 10.0 >>> sqrt(-4) (0.0 + 2.0j) >>> sqrt(1+1j) (1.09868411346781 + 0.455089860562227j) >>> sqrt(inf) +inf Square root evaluation is fast at huge precision:: >>> mp.dps = 50000 >>> a = sqrt(3) >>> str(a)[-10:] '9329332814' :func:`mpmath.iv.sqrt` supports interval arguments:: >>> iv.dps = 15; iv.pretty = True >>> iv.sqrt([16,100]) [4.0, 10.0] >>> iv.sqrt(2) [1.4142135623730949234, 1.4142135623730951455] >>> iv.sqrt(2) ** 2 [1.9999999999999995559, 2.0000000000000004441] """ cbrt = r""" ``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This function is faster and more accurate than raising to a floating-point fraction:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> 125**(mpf(1)/3) mpf('4.9999999999999991') >>> cbrt(125) mpf('5.0') Every nonzero complex number has three cube roots. This function returns the cube root defined by `\exp(\log(x)/3)` where the principal branch of the natural logarithm is used. Note that this does not give a real cube root for negative real numbers:: >>> mp.pretty = True >>> cbrt(-1) (0.5 + 0.866025403784439j) """ exp = r""" Computes the exponential function, .. math :: \exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}. For complex numbers, the exponential function also satisfies .. math :: \exp(x+yi) = e^x (\cos y + i \sin y). **Basic examples** Some values of the exponential function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> exp(0) 1.0 >>> exp(1) 2.718281828459045235360287 >>> exp(-1) 0.3678794411714423215955238 >>> exp(inf) +inf >>> exp(-inf) 0.0 Arguments can be arbitrarily large:: >>> exp(10000) 8.806818225662921587261496e+4342 >>> exp(-10000) 1.135483865314736098540939e-4343 Evaluation is supported for interval arguments via :func:`mpmath.iv.exp`:: >>> iv.dps = 25; iv.pretty = True >>> iv.exp([-inf,0]) [0.0, 1.0] >>> iv.exp([0,1]) [1.0, 2.71828182845904523536028749558] The exponential function can be evaluated efficiently to arbitrary precision:: >>> mp.dps = 10000 >>> exp(pi) #doctest: +ELLIPSIS 23.140692632779269005729...8984304016040616 **Functional properties** Numerical verification of Euler's identity for the complex exponential function:: >>> mp.dps = 15 >>> exp(j*pi)+1 (0.0 + 1.22464679914735e-16j) >>> chop(exp(j*pi)+1) 0.0 This recovers the coefficients (reciprocal factorials) in the Maclaurin series expansion of exp:: >>> nprint(taylor(exp, 0, 5)) [1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333] The exponential function is its own derivative and antiderivative:: >>> exp(pi) 23.1406926327793 >>> diff(exp, pi) 23.1406926327793 >>> quad(exp, [-inf, pi]) 23.1406926327793 The exponential function can be evaluated using various methods, including direct summation of the series, limits, and solving the defining differential equation:: >>> nsum(lambda k: pi**k/fac(k), [0,inf]) 23.1406926327793 >>> limit(lambda k: (1+pi/k)**k, inf) 23.1406926327793 >>> odefun(lambda t, x: x, 0, 1)(pi) 23.1406926327793 """ cosh = r""" Computes the hyperbolic cosine of `x`, `\cosh(x) = (e^x + e^{-x})/2`. Values and limits include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> cosh(0) 1.0 >>> cosh(1) 1.543080634815243778477906 >>> cosh(-inf), cosh(+inf) (+inf, +inf) The hyperbolic cosine is an even, convex function with a global minimum at `x = 0`, having a Maclaurin series that starts:: >>> nprint(chop(taylor(cosh, 0, 5))) [1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0] Generalized to complex numbers, the hyperbolic cosine is equivalent to a cosine with the argument rotated in the imaginary direction, or `\cosh x = \cos ix`:: >>> cosh(2+3j) (-3.724545504915322565473971 + 0.5118225699873846088344638j) >>> cos(3-2j) (-3.724545504915322565473971 + 0.5118225699873846088344638j) """ sinh = r""" Computes the hyperbolic sine of `x`, `\sinh(x) = (e^x - e^{-x})/2`. Values and limits include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> sinh(0) 0.0 >>> sinh(1) 1.175201193643801456882382 >>> sinh(-inf), sinh(+inf) (-inf, +inf) The hyperbolic sine is an odd function, with a Maclaurin series that starts:: >>> nprint(chop(taylor(sinh, 0, 5))) [0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333] Generalized to complex numbers, the hyperbolic sine is essentially a sine with a rotation `i` applied to the argument; more precisely, `\sinh x = -i \sin ix`:: >>> sinh(2+3j) (-3.590564589985779952012565 + 0.5309210862485198052670401j) >>> j*sin(3-2j) (-3.590564589985779952012565 + 0.5309210862485198052670401j) """ tanh = r""" Computes the hyperbolic tangent of `x`, `\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> tanh(0) 0.0 >>> tanh(1) 0.7615941559557648881194583 >>> tanh(-inf), tanh(inf) (-1.0, 1.0) The hyperbolic tangent is an odd, sigmoidal function, similar to the inverse tangent and error function. Its Maclaurin series is:: >>> nprint(chop(taylor(tanh, 0, 5))) [0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333] Generalized to complex numbers, the hyperbolic tangent is essentially a tangent with a rotation `i` applied to the argument; more precisely, `\tanh x = -i \tan ix`:: >>> tanh(2+3j) (0.9653858790221331242784803 - 0.009884375038322493720314034j) >>> j*tan(3-2j) (0.9653858790221331242784803 - 0.009884375038322493720314034j) """ cos = r""" Computes the cosine of `x`, `\cos(x)`. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> cos(pi/3) 0.5 >>> cos(100000001) -0.9802850113244713353133243 >>> cos(2+3j) (-4.189625690968807230132555 - 9.109227893755336597979197j) >>> cos(inf) nan >>> nprint(chop(taylor(cos, 0, 6))) [1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889] Intervals are supported via :func:`mpmath.iv.cos`:: >>> iv.dps = 25; iv.pretty = True >>> iv.cos([0,1]) [0.540302305868139717400936602301, 1.0] >>> iv.cos([0,2]) [-0.41614683654714238699756823214, 1.0] """ sin = r""" Computes the sine of `x`, `\sin(x)`. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> sin(pi/3) 0.8660254037844386467637232 >>> sin(100000001) 0.1975887055794968911438743 >>> sin(2+3j) (9.1544991469114295734673 - 4.168906959966564350754813j) >>> sin(inf) nan >>> nprint(chop(taylor(sin, 0, 6))) [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0] Intervals are supported via :func:`mpmath.iv.sin`:: >>> iv.dps = 25; iv.pretty = True >>> iv.sin([0,1]) [0.0, 0.841470984807896506652502331201] >>> iv.sin([0,2]) [0.0, 1.0] """ tan = r""" Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`. The tangent function is singular at `x = (n+1/2)\pi`, but ``tan(x)`` always returns a finite result since `(n+1/2)\pi` cannot be represented exactly using floating-point arithmetic. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> tan(pi/3) 1.732050807568877293527446 >>> tan(100000001) -0.2015625081449864533091058 >>> tan(2+3j) (-0.003764025641504248292751221 + 1.003238627353609801446359j) >>> tan(inf) nan >>> nprint(chop(taylor(tan, 0, 6))) [0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0] Intervals are supported via :func:`mpmath.iv.tan`:: >>> iv.dps = 25; iv.pretty = True >>> iv.tan([0,1]) [0.0, 1.55740772465490223050697482944] >>> iv.tan([0,2]) # Interval includes a singularity [-inf, +inf] """ sec = r""" Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`. The secant function is singular at `x = (n+1/2)\pi`, but ``sec(x)`` always returns a finite result since `(n+1/2)\pi` cannot be represented exactly using floating-point arithmetic. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> sec(pi/3) 2.0 >>> sec(10000001) -1.184723164360392819100265 >>> sec(2+3j) (-0.04167496441114427004834991 + 0.0906111371962375965296612j) >>> sec(inf) nan >>> nprint(chop(taylor(sec, 0, 6))) [1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222] Intervals are supported via :func:`mpmath.iv.sec`:: >>> iv.dps = 25; iv.pretty = True >>> iv.sec([0,1]) [1.0, 1.85081571768092561791175326276] >>> iv.sec([0,2]) # Interval includes a singularity [-inf, +inf] """ csc = r""" Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`. This cosecant function is singular at `x = n \pi`, but with the exception of the point `x = 0`, ``csc(x)`` returns a finite result since `n \pi` cannot be represented exactly using floating-point arithmetic. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> csc(pi/3) 1.154700538379251529018298 >>> csc(10000001) -1.864910497503629858938891 >>> csc(2+3j) (0.09047320975320743980579048 + 0.04120098628857412646300981j) >>> csc(inf) nan Intervals are supported via :func:`mpmath.iv.csc`:: >>> iv.dps = 25; iv.pretty = True >>> iv.csc([0,1]) # Interval includes a singularity [1.18839510577812121626159943988, +inf] >>> iv.csc([0,2]) [1.0, +inf] """ cot = r""" Computes the cotangent of `x`, `\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`. This cotangent function is singular at `x = n \pi`, but with the exception of the point `x = 0`, ``cot(x)`` returns a finite result since `n \pi` cannot be represented exactly using floating-point arithmetic. >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> cot(pi/3) 0.5773502691896257645091488 >>> cot(10000001) 1.574131876209625656003562 >>> cot(2+3j) (-0.003739710376336956660117409 - 0.9967577965693583104609688j) >>> cot(inf) nan Intervals are supported via :func:`mpmath.iv.cot`:: >>> iv.dps = 25; iv.pretty = True >>> iv.cot([0,1]) # Interval includes a singularity [0.642092615934330703006419974862, +inf] >>> iv.cot([1,2]) [-inf, +inf] """ acos = r""" Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`. Since `-1 \le \cos(x) \le 1` for real `x`, the inverse cosine is real-valued only for `-1 \le x \le 1`. On this interval, :func:`~mpmath.acos` is defined to be a monotonically decreasing function assuming values between `+\pi` and `0`. Basic values are:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> acos(-1) 3.141592653589793238462643 >>> acos(0) 1.570796326794896619231322 >>> acos(1) 0.0 >>> nprint(chop(taylor(acos, 0, 6))) [1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0] :func:`~mpmath.acos` is defined so as to be a proper inverse function of `\cos(\theta)` for `0 \le \theta < \pi`. We have `\cos(\cos^{-1}(x)) = x` for all `x`, but `\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`:: >>> for x in [1, 10, -1, 2+3j, 10+3j]: ... print("%s %s" % (cos(acos(x)), acos(cos(x)))) ... 1.0 1.0 (10.0 + 0.0j) 2.566370614359172953850574 -1.0 1.0 (2.0 + 3.0j) (2.0 + 3.0j) (10.0 + 3.0j) (2.566370614359172953850574 - 3.0j) The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos` places the branch cuts along the line segments `(-\infty, -1)` and `(+1, +\infty)`. In general, .. math :: \cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right) where the principal-branch log and square root are implied. """ asin = r""" Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`. Since `-1 \le \sin(x) \le 1` for real `x`, the inverse sine is real-valued only for `-1 \le x \le 1`. On this interval, it is defined to be a monotonically increasing function assuming values between `-\pi/2` and `\pi/2`. Basic values are:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> asin(-1) -1.570796326794896619231322 >>> asin(0) 0.0 >>> asin(1) 1.570796326794896619231322 >>> nprint(chop(taylor(asin, 0, 6))) [0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0] :func:`~mpmath.asin` is defined so as to be a proper inverse function of `\sin(\theta)` for `-\pi/2 < \theta < \pi/2`. We have `\sin(\sin^{-1}(x)) = x` for all `x`, but `\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: >>> for x in [1, 10, -1, 1+3j, -2+3j]: ... print("%s %s" % (chop(sin(asin(x))), asin(sin(x)))) ... 1.0 1.0 10.0 -0.5752220392306202846120698 -1.0 -1.0 (1.0 + 3.0j) (1.0 + 3.0j) (-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j) The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin` places the branch cuts along the line segments `(-\infty, -1)` and `(+1, +\infty)`. In general, .. math :: \sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right) where the principal-branch log and square root are implied. """ atan = r""" Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`. This is a real-valued function for all real `x`, with range `(-\pi/2, \pi/2)`. Basic values are:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> atan(-inf) -1.570796326794896619231322 >>> atan(-1) -0.7853981633974483096156609 >>> atan(0) 0.0 >>> atan(1) 0.7853981633974483096156609 >>> atan(inf) 1.570796326794896619231322 >>> nprint(chop(taylor(atan, 0, 6))) [0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0] The inverse tangent is often used to compute angles. However, the atan2 function is often better for this as it preserves sign (see :func:`~mpmath.atan2`). :func:`~mpmath.atan` is defined so as to be a proper inverse function of `\tan(\theta)` for `-\pi/2 < \theta < \pi/2`. We have `\tan(\tan^{-1}(x)) = x` for all `x`, but `\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: >>> mp.dps = 25 >>> for x in [1, 10, -1, 1+3j, -2+3j]: ... print("%s %s" % (tan(atan(x)), atan(tan(x)))) ... 1.0 1.0 10.0 0.5752220392306202846120698 -1.0 -1.0 (1.0 + 3.0j) (1.000000000000000000000001 + 3.0j) (-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j) The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan` places the branch cuts along the line segments `(-i \infty, -i)` and `(+i, +i \infty)`. In general, .. math :: \tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right) where the principal-branch log is implied. """ acot = r"""Computes the inverse cotangent of `x`, `\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`.""" asec = r"""Computes the inverse secant of `x`, `\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`.""" acsc = r"""Computes the inverse cosecant of `x`, `\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`.""" coth = r"""Computes the hyperbolic cotangent of `x`, `\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`. """ sech = r"""Computes the hyperbolic secant of `x`, `\mathrm{sech}(x) = \frac{1}{\cosh(x)}`. """ csch = r"""Computes the hyperbolic cosecant of `x`, `\mathrm{csch}(x) = \frac{1}{\sinh(x)}`. """ acosh = r"""Computes the inverse hyperbolic cosine of `x`, `\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`. """ asinh = r"""Computes the inverse hyperbolic sine of `x`, `\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`. """ atanh = r"""Computes the inverse hyperbolic tangent of `x`, `\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`. """ acoth = r"""Computes the inverse hyperbolic cotangent of `x`, `\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`.""" asech = r"""Computes the inverse hyperbolic secant of `x`, `\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`.""" acsch = r"""Computes the inverse hyperbolic cosecant of `x`, `\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`.""" sinpi = r""" Computes `\sin(\pi x)`, more accurately than the expression ``sin(pi*x)``:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> sinpi(10**10), sin(pi*(10**10)) (0.0, -2.23936276195592e-6) >>> sinpi(10**10+0.5), sin(pi*(10**10+0.5)) (1.0, 0.999999999998721) """ cospi = r""" Computes `\cos(\pi x)`, more accurately than the expression ``cos(pi*x)``:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> cospi(10**10), cos(pi*(10**10)) (1.0, 0.999999999997493) >>> cospi(10**10+0.5), cos(pi*(10**10+0.5)) (0.0, 1.59960492420134e-6) """ sinc = r""" ``sinc(x)`` computes the unnormalized sinc function, defined as .. math :: \mathrm{sinc}(x) = \begin{cases} \sin(x)/x, & \mbox{if } x \ne 0 \\ 1, & \mbox{if } x = 0. \end{cases} See :func:`~mpmath.sincpi` for the normalized sinc function. Simple values and limits include:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> sinc(0) 1.0 >>> sinc(1) 0.841470984807897 >>> sinc(inf) 0.0 The integral of the sinc function is the sine integral Si:: >>> quad(sinc, [0, 1]) 0.946083070367183 >>> si(1) 0.946083070367183 """ sincpi = r""" ``sincpi(x)`` computes the normalized sinc function, defined as .. math :: \mathrm{sinc}_{\pi}(x) = \begin{cases} \sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\ 1, & \mbox{if } x = 0. \end{cases} Equivalently, we have `\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`. The normalization entails that the function integrates to unity over the entire real line:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> quadosc(sincpi, [-inf, inf], period=2.0) 1.0 Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately at its roots:: >>> sincpi(10) 0.0 """ expj = r""" Convenience function for computing `e^{ix}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> expj(0) (1.0 + 0.0j) >>> expj(-1) (0.5403023058681397174009366 - 0.8414709848078965066525023j) >>> expj(j) (0.3678794411714423215955238 + 0.0j) >>> expj(1+j) (0.1987661103464129406288032 + 0.3095598756531121984439128j) """ expjpi = r""" Convenience function for computing `e^{i \pi x}`. Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`, :func:`~mpmath.sinpi`):: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> expjpi(0) (1.0 + 0.0j) >>> expjpi(1) (-1.0 + 0.0j) >>> expjpi(0.5) (0.0 + 1.0j) >>> expjpi(-1) (-1.0 + 0.0j) >>> expjpi(j) (0.04321391826377224977441774 + 0.0j) >>> expjpi(1+j) (-0.04321391826377224977441774 + 0.0j) """ floor = r""" Computes the floor of `x`, `\lfloor x \rfloor`, defined as the largest integer less than or equal to `x`:: >>> from mpmath import * >>> mp.pretty = False >>> floor(3.5) mpf('3.0') .. note :: :func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is too large to be represented exactly at the present working precision, the result will be rounded, not necessarily in the direction implied by the mathematical definition of the function. To avoid rounding, use *prec=0*:: >>> mp.dps = 15 >>> print(int(floor(10**30+1))) 1000000000000000019884624838656 >>> print(int(floor(10**30+1, prec=0))) 1000000000000000000000000000001 The floor function is defined for complex numbers and acts on the real and imaginary parts separately:: >>> floor(3.25+4.75j) mpc(real='3.0', imag='4.0') """ ceil = r""" Computes the ceiling of `x`, `\lceil x \rceil`, defined as the smallest integer greater than or equal to `x`:: >>> from mpmath import * >>> mp.pretty = False >>> ceil(3.5) mpf('4.0') The ceiling function is defined for complex numbers and acts on the real and imaginary parts separately:: >>> ceil(3.25+4.75j) mpc(real='4.0', imag='5.0') See notes about rounding for :func:`~mpmath.floor`. """ nint = r""" Evaluates the nearest integer function, `\mathrm{nint}(x)`. This gives the nearest integer to `x`; on a tie, it gives the nearest even integer:: >>> from mpmath import * >>> mp.pretty = False >>> nint(3.2) mpf('3.0') >>> nint(3.8) mpf('4.0') >>> nint(3.5) mpf('4.0') >>> nint(4.5) mpf('4.0') The nearest integer function is defined for complex numbers and acts on the real and imaginary parts separately:: >>> nint(3.25+4.75j) mpc(real='3.0', imag='5.0') See notes about rounding for :func:`~mpmath.floor`. """ frac = r""" Gives the fractional part of `x`, defined as `\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`). In effect, this computes `x` modulo 1, or `x+n` where `n \in \mathbb{Z}` is such that `x+n \in [0,1)`:: >>> from mpmath import * >>> mp.pretty = False >>> frac(1.25) mpf('0.25') >>> frac(3) mpf('0.0') >>> frac(-1.25) mpf('0.75') For a complex number, the fractional part function applies to the real and imaginary parts separately:: >>> frac(2.25+3.75j) mpc(real='0.25', imag='0.75') Plotted, the fractional part function gives a sawtooth wave. The Fourier series coefficients have a simple form:: >>> mp.dps = 15 >>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4)) ([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775]) >>> nprint([-1/(pi*k) for k in range(1,5)]) [-0.31831, -0.159155, -0.106103, -0.0795775] .. note:: The fractional part is sometimes defined as a symmetric function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`. This convention is used, for instance, by Mathematica's ``FractionalPart``. """ sign = r""" Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|` (with the special case `\mathrm{sign}(0) = 0`):: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> sign(10) mpf('1.0') >>> sign(-10) mpf('-1.0') >>> sign(0) mpf('0.0') Note that the sign function is also defined for complex numbers, for which it gives the projection onto the unit circle:: >>> mp.dps = 15; mp.pretty = True >>> sign(1+j) (0.707106781186547 + 0.707106781186547j) """ arg = r""" Computes the complex argument (phase) of `x`, defined as the signed angle between the positive real axis and `x` in the complex plane:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> arg(3) 0.0 >>> arg(3+3j) 0.785398163397448 >>> arg(3j) 1.5707963267949 >>> arg(-3) 3.14159265358979 >>> arg(-3j) -1.5707963267949 The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and with the sign convention that a nonnegative imaginary part results in a nonnegative argument. The value returned by :func:`~mpmath.arg` is an ``mpf`` instance. """ fabs = r""" Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`, :func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``) into mpmath numbers:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fabs(3) mpf('3.0') >>> fabs(-3) mpf('3.0') >>> fabs(3+4j) mpf('5.0') """ re = r""" Returns the real part of `x`, `\Re(x)`. :func:`~mpmath.re` converts a non-mpmath number to an mpmath number:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> re(3) mpf('3.0') >>> re(-1+4j) mpf('-1.0') """ im = r""" Returns the imaginary part of `x`, `\Im(x)`. :func:`~mpmath.im` converts a non-mpmath number to an mpmath number:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> im(3) mpf('0.0') >>> im(-1+4j) mpf('4.0') """ conj = r""" Returns the complex conjugate of `x`, `\overline{x}`. Unlike ``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> conj(3) mpf('3.0') >>> conj(-1+4j) mpc(real='-1.0', imag='-4.0') """ polar = r""" Returns the polar representation of the complex number `z` as a pair `(r, \phi)` such that `z = r e^{i \phi}`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> polar(-2) (2.0, 3.14159265358979) >>> polar(3-4j) (5.0, -0.927295218001612) """ rect = r""" Returns the complex number represented by polar coordinates `(r, \phi)`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> chop(rect(2, pi)) -2.0 >>> rect(sqrt(2), -pi/4) (1.0 - 1.0j) """ expm1 = r""" Computes `e^x - 1`, accurately for small `x`. Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from potentially catastrophic cancellation:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> exp(1e-10)-1; print(expm1(1e-10)) 1.00000008274037e-10 1.00000000005e-10 >>> exp(1e-20)-1; print(expm1(1e-20)) 0.0 1.0e-20 >>> 1/(exp(1e-20)-1) Traceback (most recent call last): ... ZeroDivisionError >>> 1/expm1(1e-20) 1.0e+20 Evaluation works for extremely tiny values:: >>> expm1(0) 0.0 >>> expm1('1e-10000000') 1.0e-10000000 """ powm1 = r""" Computes `x^y - 1`, accurately when `x^y` is very close to 1. This avoids potentially catastrophic cancellation:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> power(0.99999995, 1e-10) - 1 0.0 >>> powm1(0.99999995, 1e-10) -5.00000012791934e-18 Powers exactly equal to 1, and only those powers, yield 0 exactly:: >>> powm1(-j, 4) (0.0 + 0.0j) >>> powm1(3, 0) 0.0 >>> powm1(fadd(-1, 1e-100, exact=True), 4) -4.0e-100 Evaluation works for extremely tiny `y`:: >>> powm1(2, '1e-100000') 6.93147180559945e-100001 >>> powm1(j, '1e-1000') (-1.23370055013617e-2000 + 1.5707963267949e-1000j) """ root = r""" ``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number `r` that (up to possible approximation error) satisfies `r^n = z`. (``nthroot`` is available as an alias for ``root``.) Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are equidistant points on a circle with radius `|z|^{1/n}`, centered around the origin. A specific root may be selected using the optional index `k`. The roots are indexed counterclockwise, starting with `k = 0` for the root closest to the positive real half-axis. The `k = 0` root is the so-called principal `n`-th root, often denoted by `\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is a positive real number, the principal root is just the unique positive `n`-th root of `z`. Under some circumstances, non-principal real roots exist: for positive real `z`, `n` even, there is a negative root given by `k = n/2`; for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`. To obtain all roots with a simple expression, use ``[root(z,n,k) for k in range(n)]``. An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots` provides a slightly more convenient way to obtain the roots of unity, including the option to compute only the primitive roots of unity. Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed. :func:`~mpmath.root` is implemented to use Newton's method for small `n`. At high precision, this makes `x^{1/n}` not much more expensive than the regular exponentiation, `x^n`. For very large `n`, :func:`~mpmath.nthroot` falls back to use the exponential function. **Examples** :func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a floating-point fraction:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> 16807 ** (mpf(1)/5) mpf('7.0000000000000009') >>> root(16807, 5) mpf('7.0') >>> nthroot(16807, 5) # Alias mpf('7.0') A high-precision root:: >>> mp.dps = 50; mp.pretty = True >>> nthroot(10, 5) 1.584893192461113485202101373391507013269442133825 >>> nthroot(10, 5) ** 5 10.0 Computing principal and non-principal square and cube roots:: >>> mp.dps = 15 >>> root(10, 2) 3.16227766016838 >>> root(10, 2, 1) -3.16227766016838 >>> root(-10, 3) (1.07721734501594 + 1.86579517236206j) >>> root(-10, 3, 1) -2.15443469003188 >>> root(-10, 3, 2) (1.07721734501594 - 1.86579517236206j) All the 7th roots of a complex number:: >>> for r in [root(3+4j, 7, k) for k in range(7)]: ... print("%s %s" % (r, r**7)) ... (1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j) (0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j) (-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j) (-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j) (-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j) (-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j) (0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j) Cube roots of unity:: >>> for k in range(3): print(root(1, 3, k)) ... 1.0 (-0.5 + 0.866025403784439j) (-0.5 - 0.866025403784439j) Some exact high order roots:: >>> root(75**210, 105) 5625.0 >>> root(1, 128, 96) (0.0 - 1.0j) >>> root(4**128, 128, 96) (0.0 - 4.0j) """ unitroots = r""" ``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`, all the distinct `n`-th roots of unity, as a list. If the option *primitive=True* is passed, only the primitive roots are returned. Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct roots for each `n` (`\zeta_k` and `\zeta_j` are the same when `k = j \pmod n`), which form a regular polygon with vertices on the unit circle. They are ordered counterclockwise with increasing `k`, starting with `\zeta_0 = 1`. **Examples** The roots of unity up to `n = 4`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nprint(unitroots(1)) [1.0] >>> nprint(unitroots(2)) [1.0, -1.0] >>> nprint(unitroots(3)) [1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)] >>> nprint(unitroots(4)) [1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)] Roots of unity form a geometric series that sums to 0:: >>> mp.dps = 50 >>> chop(fsum(unitroots(25))) 0.0 Primitive roots up to `n = 4`:: >>> mp.dps = 15 >>> nprint(unitroots(1, primitive=True)) [1.0] >>> nprint(unitroots(2, primitive=True)) [-1.0] >>> nprint(unitroots(3, primitive=True)) [(-0.5 + 0.866025j), (-0.5 - 0.866025j)] >>> nprint(unitroots(4, primitive=True)) [(0.0 + 1.0j), (0.0 - 1.0j)] There are only four primitive 12th roots:: >>> nprint(unitroots(12, primitive=True)) [(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)] The `n`-th roots of unity form a group, the cyclic group of order `n`. Any primitive root `r` is a generator for this group, meaning that `r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in some permuted order):: >>> for r in unitroots(6): print(r) ... 1.0 (0.5 + 0.866025403784439j) (-0.5 + 0.866025403784439j) -1.0 (-0.5 - 0.866025403784439j) (0.5 - 0.866025403784439j) >>> r = unitroots(6, primitive=True)[1] >>> for k in range(6): print(chop(r**k)) ... 1.0 (0.5 - 0.866025403784439j) (-0.5 - 0.866025403784439j) -1.0 (-0.5 + 0.866025403784438j) (0.5 + 0.866025403784438j) The number of primitive roots equals the Euler totient function `\phi(n)`:: >>> [len(unitroots(n, primitive=True)) for n in range(1,20)] [1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18] """ log = r""" Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm is defined in terms of the natural logarithm as `\log_b(x) = \ln(x)/\ln(b)`. By convention, we take `\log(0) = -\infty`. The natural logarithm is real if `x > 0` and complex if `x < 0` or if `x` is complex. The principal branch of the complex logarithm is used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`. **Examples** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> log(1) 0.0 >>> log(2) 0.693147180559945 >>> log(1000,10) 3.0 >>> log(4, 16) 0.5 >>> log(j) (0.0 + 1.5707963267949j) >>> log(-1) (0.0 + 3.14159265358979j) >>> log(0) -inf >>> log(inf) +inf The natural logarithm is the antiderivative of `1/x`:: >>> quad(lambda x: 1/x, [1, 5]) 1.6094379124341 >>> log(5) 1.6094379124341 >>> diff(log, 10) 0.1 The Taylor series expansion of the natural logarithm around `x = 1` has coefficients `(-1)^{n+1}/n`:: >>> nprint(taylor(log, 1, 7)) [0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857] :func:`~mpmath.log` supports arbitrary precision evaluation:: >>> mp.dps = 50 >>> log(pi) 1.1447298858494001741434273513530587116472948129153 >>> log(pi, pi**3) 0.33333333333333333333333333333333333333333333333333 >>> mp.dps = 25 >>> log(3+4j) (1.609437912434100374600759 + 0.9272952180016122324285125j) """ log10 = r""" Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)`` is equivalent to ``log(x, 10)``. """ fmod = r""" Converts `x` and `y` to mpmath numbers and returns `x \mod y`. For mpmath numbers, this is equivalent to ``x % y``. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> fmod(100, pi) 2.61062773871641 You can use :func:`~mpmath.fmod` to compute fractional parts of numbers:: >>> fmod(10.25, 1) 0.25 """ radians = r""" Converts the degree angle `x` to radians:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> radians(60) 1.0471975511966 """ degrees = r""" Converts the radian angle `x` to a degree angle:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> degrees(pi/3) 60.0 """ atan2 = r""" Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`, giving the signed angle between the positive `x`-axis and the point `(x, y)` in the 2D plane. This function is defined for real `x` and `y` only. The two-argument arctangent essentially computes `\mathrm{atan}(y/x)`, but accounts for the signs of both `x` and `y` to give the angle for the correct quadrant. The following examples illustrate the difference:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> atan2(1,1), atan(1/1.) (0.785398163397448, 0.785398163397448) >>> atan2(1,-1), atan(1/-1.) (2.35619449019234, -0.785398163397448) >>> atan2(-1,1), atan(-1/1.) (-0.785398163397448, -0.785398163397448) >>> atan2(-1,-1), atan(-1/-1.) (-2.35619449019234, 0.785398163397448) The angle convention is the same as that used for the complex argument; see :func:`~mpmath.arg`. """ fibonacci = r""" ``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)` with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci` extends this definition to arbitrary real and complex arguments using the formula .. math :: F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5} where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this continuous formula to compute `F(n)` for extremely large `n`, where calculating the exact integer would be wasteful. For convenience, :func:`~mpmath.fib` is available as an alias for :func:`~mpmath.fibonacci`. **Basic examples** Some small Fibonacci numbers are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for i in range(10): ... print(fibonacci(i)) ... 0.0 1.0 1.0 2.0 3.0 5.0 8.0 13.0 21.0 34.0 >>> fibonacci(50) 12586269025.0 The recurrence for `F(n)` extends backwards to negative `n`:: >>> for i in range(10): ... print(fibonacci(-i)) ... 0.0 1.0 -1.0 2.0 -3.0 5.0 -8.0 13.0 -21.0 34.0 Large Fibonacci numbers will be computed approximately unless the precision is set high enough:: >>> fib(200) 2.8057117299251e+41 >>> mp.dps = 45 >>> fib(200) 280571172992510140037611932413038677189525.0 :func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers of stupendous size:: >>> mp.dps = 15 >>> fibonacci(10**25) 3.49052338550226e+2089876402499787337692720 **Real and complex arguments** The extended Fibonacci function is an analytic function. The property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`:: >>> mp.dps = 15 >>> fib(pi) 2.1170270579161 >>> fib(pi-1) + fib(pi-2) 2.1170270579161 >>> fib(3+4j) (-5248.51130728372 - 14195.962288353j) >>> fib(2+4j) + fib(1+4j) (-5248.51130728372 - 14195.962288353j) The Fibonacci function has infinitely many roots on the negative half-real axis. The first root is at 0, the second is close to -0.18, and then there are infinitely many roots that asymptotically approach `-n+1/2`:: >>> findroot(fib, -0.2) -0.183802359692956 >>> findroot(fib, -2) -1.57077646820395 >>> findroot(fib, -17) -16.4999999596115 >>> findroot(fib, -24) -23.5000000000479 **Mathematical relationships** For large `n`, `F(n+1)/F(n)` approaches the golden ratio:: >>> mp.dps = 50 >>> fibonacci(101)/fibonacci(100) 1.6180339887498948482045868343656381177203127439638 >>> +phi 1.6180339887498948482045868343656381177203091798058 The sum of reciprocal Fibonacci numbers converges to an irrational number for which no closed form expression is known:: >>> mp.dps = 15 >>> nsum(lambda n: 1/fib(n), [1, inf]) 3.35988566624318 Amazingly, however, the sum of odd-index reciprocal Fibonacci numbers can be expressed in terms of a Jacobi theta function:: >>> nsum(lambda n: 1/fib(2*n+1), [0, inf]) 1.82451515740692 >>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4 1.82451515740692 Some related sums can be done in closed form:: >>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf]) 1.11803398874989 >>> phi - 0.5 1.11803398874989 >>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1))) >>> nsum(f, [1, inf]) 0.618033988749895 >>> phi-1 0.618033988749895 **References** 1. http://mathworld.wolfram.com/FibonacciNumber.html """ altzeta = r""" Gives the Dirichlet eta function, `\eta(s)`, also known as the alternating zeta function. This function is defined in analogy with the Riemann zeta function as providing the sum of the alternating series .. math :: \eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s} = 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots The eta function, unlike the Riemann zeta function, is an entire function, having a finite value for all complex `s`. The special case `\eta(1) = \log(2)` gives the value of the alternating harmonic series. The alternating zeta function may expressed using the Riemann zeta function as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed in terms of the Hurwitz zeta function, for example using :func:`~mpmath.dirichlet` (see documentation for that function). **Examples** Some special values are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> altzeta(1) 0.693147180559945 >>> altzeta(0) 0.5 >>> altzeta(-1) 0.25 >>> altzeta(-2) 0.0 An example of a sum that can be computed more accurately and efficiently via :func:`~mpmath.altzeta` than via numerical summation:: >>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100)) 0.867204951503984 >>> altzeta(2.5) 0.867199889012184 At positive even integers, the Dirichlet eta function evaluates to a rational multiple of a power of `\pi`:: >>> altzeta(2) 0.822467033424113 >>> pi**2/12 0.822467033424113 Like the Riemann zeta function, `\eta(s)`, approaches 1 as `s` approaches positive infinity, although it does so from below rather than from above:: >>> altzeta(30) 0.999999999068682 >>> altzeta(inf) 1.0 >>> mp.pretty = False >>> altzeta(1000, rounding='d') mpf('0.99999999999999989') >>> altzeta(1000, rounding='u') mpf('1.0') **References** 1. http://mathworld.wolfram.com/DirichletEtaFunction.html 2. http://en.wikipedia.org/wiki/Dirichlet_eta_function """ factorial = r""" Computes the factorial, `x!`. For integers `n \ge 0`, we have `n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial is defined for real or complex `x` by `x! = \Gamma(x+1)`. **Examples** Basic values and limits:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for k in range(6): ... print("%s %s" % (k, fac(k))) ... 0 1.0 1 1.0 2 2.0 3 6.0 4 24.0 5 120.0 >>> fac(inf) +inf >>> fac(0.5), sqrt(pi)/2 (0.886226925452758, 0.886226925452758) For large positive `x`, `x!` can be approximated by Stirling's formula:: >>> x = 10**10 >>> fac(x) 2.32579620567308e+95657055186 >>> sqrt(2*pi*x)*(x/e)**x 2.32579597597705e+95657055186 :func:`~mpmath.fac` supports evaluation for astronomically large values:: >>> fac(10**30) 6.22311232304258e+29565705518096748172348871081098 Reciprocal factorials appear in the Taylor series of the exponential function (among many other contexts):: >>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1) (2.71828182845905, 2.71828182845905) >>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi) (23.1406926327793, 23.1406926327793) """ gamma = r""" Computes the gamma function, `\Gamma(x)`. The gamma function is a shifted version of the ordinary factorial, satisfying `\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it is defined by .. math :: \Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0` by analytic continuation. **Examples** Basic values and limits:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for k in range(1, 6): ... print("%s %s" % (k, gamma(k))) ... 1 1.0 2 1.0 3 2.0 4 6.0 5 24.0 >>> gamma(inf) +inf >>> gamma(0) Traceback (most recent call last): ... ValueError: gamma function pole The gamma function of a half-integer is a rational multiple of `\sqrt{\pi}`:: >>> gamma(0.5), sqrt(pi) (1.77245385090552, 1.77245385090552) >>> gamma(1.5), sqrt(pi)/2 (0.886226925452758, 0.886226925452758) We can check the integral definition:: >>> gamma(3.5) 3.32335097044784 >>> quad(lambda t: t**2.5*exp(-t), [0,inf]) 3.32335097044784 :func:`~mpmath.gamma` supports arbitrary-precision evaluation and complex arguments:: >>> mp.dps = 50 >>> gamma(sqrt(3)) 0.91510229697308632046045539308226554038315280564184 >>> mp.dps = 25 >>> gamma(2j) (0.009902440080927490985955066 - 0.07595200133501806872408048j) Arguments can also be large. Note that the gamma function grows very quickly:: >>> mp.dps = 15 >>> gamma(10**20) 1.9328495143101e+1956570551809674817225 """ psi = r""" Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`. Special cases are known as the *digamma function* (`\psi^{(0)}(z)`), the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma functions are defined as the logarithmic derivatives of the gamma function: .. math :: \psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z) In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the present implementation of :func:`~mpmath.psi`, the order `m` must be a nonnegative integer, while the argument `z` may be an arbitrary complex number (with exception for the polygamma function's poles at `z = 0, -1, -2, \ldots`). **Examples** For various rational arguments, the polygamma function reduces to a combination of standard mathematical constants:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> psi(0, 1), -euler (-0.5772156649015328606065121, -0.5772156649015328606065121) >>> psi(1, '1/4'), pi**2+8*catalan (17.19732915450711073927132, 17.19732915450711073927132) >>> psi(2, '1/2'), -14*apery (-16.82879664423431999559633, -16.82879664423431999559633) The polygamma functions are derivatives of each other:: >>> diff(lambda x: psi(3, x), pi), psi(4, pi) (-0.1105749312578862734526952, -0.1105749312578862734526952) >>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2) (-0.375, -0.375) The digamma function diverges logarithmically as `z \to \infty`, while higher orders tend to zero:: >>> psi(0,inf), psi(1,inf), psi(2,inf) (+inf, 0.0, 0.0) Evaluation for a complex argument:: >>> psi(2, -1-2j) (0.03902435405364952654838445 + 0.1574325240413029954685366j) Evaluation is supported for large orders `m` and/or large arguments `z`:: >>> psi(3, 10**100) 2.0e-300 >>> psi(250, 10**30+10**20*j) (-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j) **Application to infinite series** Any infinite series where the summand is a rational function of the index `k` can be evaluated in closed form in terms of polygamma functions of the roots and poles of the summand:: >>> a = sqrt(2) >>> b = sqrt(3) >>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf]) 0.4049668927517857061917531 >>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2 0.4049668927517857061917531 This follows from the series representation (`m > 0`) .. math :: \psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty} \frac{1}{(z+k)^{m+1}}. Since the roots of a polynomial may be complex, it is sometimes necessary to use the complex polygamma function to evaluate an entirely real-valued sum:: >>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf]) 1.694361433907061256154665 >>> nprint(polyroots([1,-2,3])) [(1.0 - 1.41421j), (1.0 + 1.41421j)] >>> r1 = 1-sqrt(2)*j >>> r2 = r1.conjugate() >>> (psi(0,-r2)-psi(0,-r1))/(r1-r2) (1.694361433907061256154665 + 0.0j) """ digamma = r""" Shortcut for ``psi(0,z)``. """ harmonic = r""" If `n` is an integer, ``harmonic(n)`` gives a floating-point approximation of the `n`-th harmonic number `H(n)`, defined as .. math :: H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n} The first few harmonic numbers are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(8): ... print("%s %s" % (n, harmonic(n))) ... 0 0.0 1 1.0 2 1.5 3 1.83333333333333 4 2.08333333333333 5 2.28333333333333 6 2.45 7 2.59285714285714 The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges:: >>> harmonic(inf) +inf :func:`~mpmath.harmonic` is evaluated using the digamma function rather than by summing the harmonic series term by term. It can therefore be computed quickly for arbitrarily large `n`, and even for nonintegral arguments:: >>> harmonic(10**100) 230.835724964306 >>> harmonic(0.5) 0.613705638880109 >>> harmonic(3+4j) (2.24757548223494 + 0.850502209186044j) :func:`~mpmath.harmonic` supports arbitrary precision evaluation:: >>> mp.dps = 50 >>> harmonic(11) 3.0198773448773448773448773448773448773448773448773 >>> harmonic(pi) 1.8727388590273302654363491032336134987519132374152 The harmonic series diverges, but at a glacial pace. It is possible to calculate the exact number of terms required before the sum exceeds a given amount, say 100:: >>> mp.dps = 50 >>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10) >>> v 15092688622113788323693563264538101449859496.864101 >>> v = int(ceil(v)) >>> print(v) 15092688622113788323693563264538101449859497 >>> harmonic(v-1) 99.999999999999999999999999999999999999999999942747 >>> harmonic(v) 100.000000000000000000000000000000000000000000009 """ bernoulli = r""" Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`. The Bernoulli numbers are rational numbers, but this function returns a floating-point approximation. To obtain an exact fraction, use :func:`~mpmath.bernfrac` instead. **Examples** Numerical values of the first few Bernoulli numbers:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(15): ... print("%s %s" % (n, bernoulli(n))) ... 0 1.0 1 -0.5 2 0.166666666666667 3 0.0 4 -0.0333333333333333 5 0.0 6 0.0238095238095238 7 0.0 8 -0.0333333333333333 9 0.0 10 0.0757575757575758 11 0.0 12 -0.253113553113553 13 0.0 14 1.16666666666667 Bernoulli numbers can be approximated with arbitrary precision:: >>> mp.dps = 50 >>> bernoulli(100) -2.8382249570693706959264156336481764738284680928013e+78 Arbitrarily large `n` are supported:: >>> mp.dps = 15 >>> bernoulli(10**20 + 2) 3.09136296657021e+1876752564973863312327 The Bernoulli numbers are related to the Riemann zeta function at integer arguments:: >>> -bernoulli(8) * (2*pi)**8 / (2*fac(8)) 1.00407735619794 >>> zeta(8) 1.00407735619794 **Algorithm** For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence formula due to Ramanujan. All results in this range are cached, so sequential computation of small Bernoulli numbers is guaranteed to be fast. For larger `n`, `B_n` is evaluated in terms of the Riemann zeta function. """ stieltjes = r""" For a nonnegative integer `n`, ``stieltjes(n)`` computes the `n`-th Stieltjes constant `\gamma_n`, defined as the `n`-th coefficient in the Laurent series expansion of the Riemann zeta function around the pole at `s = 1`. That is, we have: .. math :: \zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty} \frac{(-1)^n}{n!} \gamma_n (s-1)^n More generally, ``stieltjes(n, a)`` gives the corresponding coefficient `\gamma_n(a)` for the Hurwitz zeta function `\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`). **Examples** The zeroth Stieltjes constant is just Euler's constant `\gamma`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> stieltjes(0) 0.577215664901533 Some more values are:: >>> stieltjes(1) -0.0728158454836767 >>> stieltjes(10) 0.000205332814909065 >>> stieltjes(30) 0.00355772885557316 >>> stieltjes(1000) -1.57095384420474e+486 >>> stieltjes(2000) 2.680424678918e+1109 >>> stieltjes(1, 2.5) -0.23747539175716 An alternative way to compute `\gamma_1`:: >>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1) -0.0728158454836767 :func:`~mpmath.stieltjes` supports arbitrary precision evaluation:: >>> mp.dps = 50 >>> stieltjes(2) -0.0096903631928723184845303860352125293590658061013408 **Algorithm** :func:`~mpmath.stieltjes` numerically evaluates the integral in the following representation due to Ainsworth, Howell and Coffey [1], [2]: .. math :: \gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} + \frac{2}{a} \Re \int_0^{\infty} \frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx. For some reference values with `a = 1`, see e.g. [4]. **References** 1. O. R. Ainsworth & L. W. Howell, "An integral representation of the generalized Euler-Mascheroni constants", NASA Technical Paper 2456 (1985), http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf 2. M. W. Coffey, "The Stieltjes constants, their relation to the `\eta_j` coefficients, and representation of the Hurwitz zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343 3. http://mathworld.wolfram.com/StieltjesConstants.html 4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt """ gammaprod = r""" Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the product / quotient of gamma functions: .. math :: \frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)} {\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)} Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers the entire product as a limit and evaluates this limit properly if any of the numerator or denominator arguments are nonpositive integers such that poles of the gamma function are encountered. That is, :func:`~mpmath.gammaprod` evaluates .. math :: \lim_{\epsilon \to 0} \frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots \Gamma(a_p+\epsilon)} {\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots \Gamma(b_q+\epsilon)} In particular: * If there are equally many poles in the numerator and the denominator, the limit is a rational number times the remaining, regular part of the product. * If there are more poles in the numerator, :func:`~mpmath.gammaprod` returns ``+inf``. * If there are more poles in the denominator, :func:`~mpmath.gammaprod` returns 0. **Examples** The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`:: >>> from mpmath import * >>> mp.dps = 15 >>> gammaprod([], [0]) 0.0 A limit:: >>> gammaprod([-4], [-3]) -0.25 >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1) -0.25 >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1) -0.25 """ beta = r""" Computes the beta function, `B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`. The beta function is also commonly defined by the integral representation .. math :: B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt **Examples** For integer and half-integer arguments where all three gamma functions are finite, the beta function becomes either rational number or a rational multiple of `\pi`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> beta(5, 2) 0.0333333333333333 >>> beta(1.5, 2) 0.266666666666667 >>> 16*beta(2.5, 1.5) 3.14159265358979 Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole of the beta function is taken to result in ``+inf``:: >>> beta(-0.5, 0.5) 0.0 >>> beta(-3, 3) -0.333333333333333 >>> beta(-2, 3) +inf >>> beta(inf, 1) 0.0 >>> beta(inf, 0) nan :func:`~mpmath.beta` supports complex numbers and arbitrary precision evaluation:: >>> beta(1, 2+j) (0.4 - 0.2j) >>> mp.dps = 25 >>> beta(j,0.5) (1.079424249270925780135675 - 1.410032405664160838288752j) >>> mp.dps = 50 >>> beta(pi, e) 0.037890298781212201348153837138927165984170287886464 Various integrals can be computed by means of the beta function:: >>> mp.dps = 15 >>> quad(lambda t: t**2.5*(1-t)**2, [0, 1]) 0.0230880230880231 >>> beta(3.5, 3) 0.0230880230880231 >>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2]) 0.319504062596158 >>> beta(2.5, 0.75)/2 0.319504062596158 """ betainc = r""" ``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized incomplete beta function, .. math :: I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt. When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete) beta function `B(a,b)`; see :func:`~mpmath.beta`. With the keyword argument ``regularized=True``, :func:`~mpmath.betainc` computes the regularized incomplete beta function `I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the beta distribution with parameters `a`, `b`. .. note : Implementations of the incomplete beta function in some other software uses a different argument order. For example, Mathematica uses the reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's three-argument incomplete beta integral (implicitly with `x1 = 0`), use ``betainc(a,b,0,x2,regularized=True)``. **Examples** Verifying that :func:`~mpmath.betainc` computes the integral in the definition:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> x,y,a,b = 3, 4, 0, 6 >>> betainc(x, y, a, b) -4010.4 >>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b]) -4010.4 The arguments may be arbitrary complex numbers:: >>> betainc(0.75, 1-4j, 0, 2+3j) (0.2241657956955709603655887 + 0.3619619242700451992411724j) With regularization:: >>> betainc(1, 2, 0, 0.25, regularized=True) 0.4375 >>> betainc(pi, e, 0, 1, regularized=True) # Complete 1.0 The beta integral satisfies some simple argument transformation symmetries:: >>> mp.dps = 15 >>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4) (56.0833333333333, 56.0833333333333, 56.0833333333333) The beta integral can often be evaluated analytically. For integer and rational arguments, the incomplete beta function typically reduces to a simple algebraic-logarithmic expression:: >>> mp.dps = 25 >>> identify(chop(betainc(0, 0, 3, 4))) '-(log((9/8)))' >>> identify(betainc(2, 3, 4, 5)) '(673/12)' >>> identify(betainc(1.5, 1, 1, 2)) '((-12+sqrt(1152))/18)' """ binomial = r""" Computes the binomial coefficient .. math :: {n \choose k} = \frac{n!}{k!(n-k)!}. The binomial coefficient gives the number of ways that `k` items can be chosen from a set of `n` items. More generally, the binomial coefficient is a well-defined function of arbitrary real or complex `n` and `k`, via the gamma function. **Examples** Generate Pascal's triangle:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(5): ... nprint([binomial(n,k) for k in range(n+1)]) ... [1.0] [1.0, 1.0] [1.0, 2.0, 1.0] [1.0, 3.0, 3.0, 1.0] [1.0, 4.0, 6.0, 4.0, 1.0] There is 1 way to select 0 items from the empty set, and 0 ways to select 1 item from the empty set:: >>> binomial(0, 0) 1.0 >>> binomial(0, 1) 0.0 :func:`~mpmath.binomial` supports large arguments:: >>> binomial(10**20, 10**20-5) 8.33333333333333e+97 >>> binomial(10**20, 10**10) 2.60784095465201e+104342944813 Nonintegral binomial coefficients find use in series expansions:: >>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4)) [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] >>> nprint([binomial(0.25, k) for k in range(5)]) [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] An integral representation:: >>> n, k = 5, 3 >>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n >>> chop(quad(f, [-pi,pi])/(2*pi)) 10.0 >>> binomial(n,k) 10.0 """ rf = r""" Computes the rising factorial or Pochhammer symbol, .. math :: x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)} where the rightmost expression is valid for nonintegral `n`. **Examples** For integral `n`, the rising factorial is a polynomial:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(5): ... nprint(taylor(lambda x: rf(x,n), 0, n)) ... [1.0] [0.0, 1.0] [0.0, 1.0, 1.0] [0.0, 2.0, 3.0, 1.0] [0.0, 6.0, 11.0, 6.0, 1.0] Evaluation is supported for arbitrary arguments:: >>> rf(2+3j, 5.5) (-7202.03920483347 - 3777.58810701527j) """ ff = r""" Computes the falling factorial, .. math :: (x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)} where the rightmost expression is valid for nonintegral `n`. **Examples** For integral `n`, the falling factorial is a polynomial:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(5): ... nprint(taylor(lambda x: ff(x,n), 0, n)) ... [1.0] [0.0, 1.0] [0.0, -1.0, 1.0] [0.0, 2.0, -3.0, 1.0] [0.0, -6.0, 11.0, -6.0, 1.0] Evaluation is supported for arbitrary arguments:: >>> ff(2+3j, 5.5) (-720.41085888203 + 316.101124983878j) """ fac2 = r""" Computes the double factorial `x!!`, defined for integers `x > 0` by .. math :: x!! = \begin{cases} 1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\ 2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even} \end{cases} and more generally by [1] .. math :: x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4} \Gamma\left(\frac{x}{2}+1\right). **Examples** The integer sequence of double factorials begins:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nprint([fac2(n) for n in range(10)]) [1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0] For large `x`, double factorials follow a Stirling-like asymptotic approximation:: >>> x = mpf(10000) >>> fac2(x) 5.97272691416282e+17830 >>> sqrt(pi)*x**((x+1)/2)*exp(-x/2) 5.97262736954392e+17830 The recurrence formula `x!! = x (x-2)!!` can be reversed to define the double factorial of negative odd integers (but not negative even integers):: >>> fac2(-1), fac2(-3), fac2(-5), fac2(-7) (1.0, -1.0, 0.333333333333333, -0.0666666666666667) >>> fac2(-2) Traceback (most recent call last): ... ValueError: gamma function pole With the exception of the poles at negative even integers, :func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments. The recurrence formula is valid generally:: >>> fac2(pi+2j) (-1.3697207890154e-12 + 3.93665300979176e-12j) >>> (pi+2j)*fac2(pi-2+2j) (-1.3697207890154e-12 + 3.93665300979176e-12j) Double factorials should not be confused with nested factorials, which are immensely larger:: >>> fac(fac(20)) 5.13805976125208e+43675043585825292774 >>> fac2(20) 3715891200.0 Double factorials appear, among other things, in series expansions of Gaussian functions and the error function. Infinite series include:: >>> nsum(lambda k: 1/fac2(k), [0, inf]) 3.05940740534258 >>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2)) 3.05940740534258 >>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf]) 4.06015693855741 >>> e * erf(1) * sqrt(pi) 4.06015693855741 A beautiful Ramanujan sum:: >>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf]) 0.90917279454693 >>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2 0.90917279454693 **References** 1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/ 2. http://mathworld.wolfram.com/DoubleFactorial.html """ hyper = r""" Evaluates the generalized hypergeometric function .. math :: \,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) = \sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n} {(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!} where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`). The parameters lists ``a_s`` and ``b_s`` may contain integers, real numbers, complex numbers, as well as exact fractions given in the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle integers and fractions more efficiently than arbitrary floating-point parameters (since rational parameters are by far the most common). **Examples** Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by comparison with :func:`~mpmath.nsum`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a,b,c,d = 2,3,4,5 >>> x = 0.25 >>> hyper([a,b],[c,d],x) 1.078903941164934876086237 >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n) >>> nsum(fn, [0, inf]) 1.078903941164934876086237 The parameters can be any combination of integers, fractions, floats and complex numbers:: >>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3) >>> x = 0.2j >>> hyper([a,b],[c,d,e],x) (0.9923571616434024810831887 - 0.005753848733883879742993122j) >>> b, e = -0.5, mpf(2)/3 >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n) >>> nsum(fn, [0, inf]) (0.9923571616434024810831887 - 0.005753848733883879742993122j) The `\,_0F_0` and `\,_1F_0` series are just elementary functions:: >>> a, z = sqrt(2), +pi >>> hyper([],[],z) 23.14069263277926900572909 >>> exp(z) 23.14069263277926900572909 >>> hyper([a],[],z) (-0.09069132879922920160334114 + 0.3283224323946162083579656j) >>> (1-z)**(-a) (-0.09069132879922920160334114 + 0.3283224323946162083579656j) If any `a_k` coefficient is a nonpositive integer, the series terminates into a finite polynomial:: >>> hyper([1,1,1,-3],[2,5],1) 0.7904761904761904761904762 >>> identify(_) '(83/105)' If any `b_k` is a nonpositive integer, the function is undefined (unless the series terminates before the division by zero occurs):: >>> hyper([1,1,1,-3],[-2,5],1) Traceback (most recent call last): ... ZeroDivisionError: pole in hypergeometric series >>> hyper([1,1,1,-1],[-2,5],1) 1.1 Except for polynomial cases, the radius of convergence `R` of the hypergeometric series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or `R = 0` (if `p > q+1`). The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`, `\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2` are available to handle the most common cases (see their documentation), but functions of higher degree are also supported via :func:`~mpmath.hyper`:: >>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point 1.141783505526870731311423 >>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole +inf >>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4 (1.543998916527972259717257 - 0.5876309929580408028816365j) >>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5 (0.9996565821853579063502466 + 0.0129721075905630604445669j) Near `z = 1` with noninteger parameters:: >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1) 2.219433352235586121250027 >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1) +inf >>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))() >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1) 2923978034.412973409330956 Please note that, as currently implemented, evaluation of `\,_pF_{p-1}` with `p \ge 3` may be slow or inaccurate when `|z-1|` is small, for some parameter values. Evaluation may be aborted if convergence appears to be too slow. The optional ``maxterms`` (limiting the number of series terms) and ``maxprec`` (limiting the internal precision) keyword arguments can be used to control evaluation:: >>> hyper([1,2,3], [4,5,6], 10000) Traceback (most recent call last): ... NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. >>> hyper([1,2,3], [4,5,6], 10000, maxterms=10**6) 7.622806053177969474396918e+4310 Additional options include ``force_series`` (which forces direct use of a hypergeometric series even if another evaluation method might work better) and ``asymp_tol`` which controls the target tolerance for using asymptotic series. When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent series. For `\,_2F_0` the Borel sum has an analytic solution and can be computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions is evaluated first by attempting to sum it directly as an asymptotic series (this only works for tiny `|z|`), and then by evaluating the Borel regularized sum using numerical integration. Except for special parameter combinations, this can be extremely slow. >>> hyper([1,1], [], 0.5) # regularization of 2F0 (1.340965419580146562086448 + 0.8503366631752726568782447j) >>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1 (1.108287213689475145830699 + 0.5327107430640678181200491j) With the following magnitude of argument, the asymptotic series for `\,_3F_1` gives only a few digits. Using Borel summation, ``hyper`` can produce a value with full accuracy:: >>> mp.dps = 15 >>> hyper([2,0.5,4], [5.25], '0.08', force_series=True) Traceback (most recent call last): ... NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. >>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4) 1.0725535790737 >>> hyper([2,0.5,4], [5.25], '0.08') (1.07269542893559 + 5.54668863216891e-5j) >>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4) 0.946344925484879 >>> hyper([2,0.5,4], [5.25], '-0.08') 0.946312503737771 >>> mp.dps = 25 >>> hyper([2,0.5,4], [5.25], '-0.08') 0.9463125037377662296700858 Note that with the positive `z` value, there is a complex part in the correct result, which falls below the tolerance of the asymptotic series. By default, a parameter that appears in both ``a_s`` and ``b_s`` will be removed unless it is a nonpositive integer. This generally speeds up evaluation by producing a hypergeometric function of lower order. This optimization can be disabled by passing ``eliminate=False``. >>> hyper([1,2,3], [4,5,3], 10000) 1.268943190440206905892212e+4321 >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False) Traceback (most recent call last): ... NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False, maxterms=10**6) 1.268943190440206905892212e+4321 If a nonpositive integer `-n` appears in both ``a_s`` and ``b_s``, this parameter cannot be unambiguously removed since it creates a term 0 / 0. In this case the hypergeometric series is understood to terminate before the division by zero occurs. This convention is consistent with Mathematica. An alternative convention of eliminating the parameters can be toggled with ``eliminate_all=True``: >>> hyper([2,-1], [-1], 3) 7.0 >>> hyper([2,-1], [-1], 3, eliminate_all=True) 0.25 >>> hyper([2], [], 3) 0.25 """ hypercomb = r""" Computes a weighted combination of hypergeometric functions .. math :: \sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}} \frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r} \Gamma(\beta_{r,k})} \,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1}, \ldots, b_{r,q}; z_r)\right]. Typically the parameters are linear combinations of a small set of base parameters; :func:`~mpmath.hypercomb` permits computing a correct value in the case that some of the `\alpha`, `\beta`, `b` turn out to be nonpositive integers, or if division by zero occurs for some `w^c`, assuming that there are opposing singularities that cancel out. The limit is computed by evaluating the function with the base parameters perturbed, at a higher working precision. The first argument should be a function that takes the perturbable base parameters ``params`` as input and returns `N` tuples ``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``, gamma factors ``alpha``, ``beta``, and hypergeometric coefficients ``a``, ``b`` each should be lists of numbers, and ``z`` should be a single number. **Examples** The following evaluates .. math :: (a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1) with `a=1, z=3`. There is a zero factor, two gamma function poles, and the 1F1 function is singular; all singularities cancel out to give a finite value:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1]) -180.769832308689 >>> -9*exp(3) -180.769832308689 """ hyp0f1 = r""" Gives the hypergeometric function `\,_0F_1`, sometimes known as the confluent limit function, defined as .. math :: \,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}. This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`, and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`). ``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for :func:`~mpmath.hyper` for more information. **Examples** Evaluation for arbitrary arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyp0f1(2, 0.25) 1.130318207984970054415392 >>> hyp0f1((1,2), 1234567) 6.27287187546220705604627e+964 >>> hyp0f1(3+4j, 1000000j) (3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j) Evaluation is supported for arbitrarily large values of `z`, using asymptotic expansions:: >>> hyp0f1(1, 10**50) 2.131705322874965310390701e+8685889638065036553022565 >>> hyp0f1(1, -10**50) 1.115945364792025420300208e-13 Verifying the differential equation:: >>> a = 2.5 >>> f = lambda z: hyp0f1(a,z) >>> for z in [0, 10, 3+4j]: ... chop(z*diff(f,z,2) + a*diff(f,z) - f(z)) ... 0.0 0.0 0.0 """ hyp1f1 = r""" Gives the confluent hypergeometric function of the first kind, .. math :: \,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!}, also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This function gives one solution to the confluent (Kummer's) differential equation .. math :: z f''(z) + (b-z) f'(z) - af(z) = 0. A second solution is given by the `U` function; see :func:`~mpmath.hyperu`. Solutions are also given in an alternate form by the Whittaker functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`). ``hyp1f1(a,b,z)`` is equivalent to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more information. **Examples** Evaluation for real and complex values of the argument `z`, with fixed parameters `a = 2, b = -1/3`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyp1f1(2, (-1,3), 3.25) -2815.956856924817275640248 >>> hyp1f1(2, (-1,3), -3.25) -1.145036502407444445553107 >>> hyp1f1(2, (-1,3), 1000) -8.021799872770764149793693e+441 >>> hyp1f1(2, (-1,3), -1000) 0.000003131987633006813594535331 >>> hyp1f1(2, (-1,3), 100+100j) (-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j) Parameters may be complex:: >>> hyp1f1(2+3j, -1+j, 10j) (261.8977905181045142673351 + 160.8930312845682213562172j) Arbitrarily large values of `z` are supported:: >>> hyp1f1(3, 4, 10**20) 3.890569218254486878220752e+43429448190325182745 >>> hyp1f1(3, 4, -10**20) 6.0e-60 >>> hyp1f1(3, 4, 10**20*j) (-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j) Verifying the differential equation:: >>> a, b = 1.5, 2 >>> f = lambda z: hyp1f1(a,b,z) >>> for z in [0, -10, 3, 3+4j]: ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) ... 0.0 0.0 0.0 0.0 An integral representation:: >>> a, b = 1.5, 3 >>> z = 1.5 >>> hyp1f1(a,b,z) 2.269381460919952778587441 >>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1) >>> gammaprod([b],[a,b-a])*quad(g, [0,1]) 2.269381460919952778587441 """ hyp1f2 = r""" Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`. The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to ``hyper([a1],[b1,b2],z)``. Evaluation works for complex and arbitrarily large arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a, b, c = 1.5, (-1,3), 2.25 >>> hyp1f2(a, b, c, 10**20) -1.159388148811981535941434e+8685889639 >>> hyp1f2(a, b, c, -10**20) -12.60262607892655945795907 >>> hyp1f2(a, b, c, 10**20*j) (4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j) >>> hyp1f2(2+3j, -2j, 0.5j, 10-20j) (135881.9905586966432662004 - 86681.95885418079535738828j) """ hyp2f2 = r""" Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`. The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to ``hyper([a1,a2],[b1,b2],z)``. Evaluation works for complex and arbitrarily large arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a, b, c, d = 1.5, (-1,3), 2.25, 4 >>> hyp2f2(a, b, c, d, 10**20) -5.275758229007902299823821e+43429448190325182663 >>> hyp2f2(a, b, c, d, -10**20) 2561445.079983207701073448 >>> hyp2f2(a, b, c, d, 10**20*j) (2218276.509664121194836667 - 1280722.539991603850462856j) >>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j) (80500.68321405666957342788 - 20346.82752982813540993502j) """ hyp2f3 = r""" Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`. The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to ``hyper([a1,a2],[b1,b2,b3],z)``. Evaluation works for arbitrarily large arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5) >>> hyp2f3(a1,a2,b1,b2,b3,10**20) -4.169178177065714963568963e+8685889590 >>> hyp2f3(a1,a2,b1,b2,b3,-10**20) 7064472.587757755088178629 >>> hyp2f3(a1,a2,b1,b2,b3,10**20*j) (-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j) >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j) (-2280.938956687033150740228 + 13620.97336609573659199632j) >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j) (4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j) """ hyp2f1 = r""" Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as *the* hypergeometric function), defined for `|z| < 1` as .. math :: \,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty} \frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}. and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)` when necessary. Special cases of this function include many of the orthogonal polynomials as well as the incomplete beta function and other functions. Properties of the Gauss hypergeometric function are documented comprehensively in many references, for example Abramowitz & Stegun, section 15. The implementation supports the analytic continuation as well as evaluation close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)`` is equivalent to ``hyper([a,b],[c],z)``. **Examples** Evaluation with `z` inside, outside and on the unit circle, for fixed parameters:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyp2f1(2, (1,2), 4, 0.75) 1.303703703703703703703704 >>> hyp2f1(2, (1,2), 4, -1.75) 0.7431290566046919177853916 >>> hyp2f1(2, (1,2), 4, 1.75) (1.418075801749271137026239 - 1.114976146679907015775102j) >>> hyp2f1(2, (1,2), 4, 1) 1.6 >>> hyp2f1(2, (1,2), 4, -1) 0.8235498012182875315037882 >>> hyp2f1(2, (1,2), 4, j) (0.9144026291433065674259078 + 0.2050415770437884900574923j) >>> hyp2f1(2, (1,2), 4, 2+j) (0.9274013540258103029011549 + 0.7455257875808100868984496j) >>> hyp2f1(2, (1,2), 4, 0.25j) (0.9931169055799728251931672 + 0.06154836525312066938147793j) Evaluation with complex parameter values:: >>> hyp2f1(1+j, 0.75, 10j, 1+5j) (0.8834833319713479923389638 + 0.7053886880648105068343509j) Evaluation with `z = 1`:: >>> hyp2f1(-2.5, 3.5, 1.5, 1) 0.0 >>> hyp2f1(-2.5, 3, 4, 1) 0.06926406926406926406926407 >>> hyp2f1(2, 3, 4, 1) +inf Evaluation for huge arguments:: >>> hyp2f1((-1,3), 1.75, 4, '1e100') (7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j) >>> hyp2f1((-1,3), 1.75, 4, '1e1000000') (7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j) >>> hyp2f1((-1,3), 1.75, 4, '1e1000000j') (1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j) An integral representation:: >>> a,b,c,z = -0.5, 1, 2.5, 0.25 >>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a) >>> gammaprod([c],[b,c-b]) * quad(g, [0,1]) 0.9480458814362824478852618 >>> hyp2f1(a,b,c,z) 0.9480458814362824478852618 Verifying the hypergeometric differential equation:: >>> f = lambda z: hyp2f1(a,b,c,z) >>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z)) 0.0 """ hyp3f2 = r""" Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1` as .. math :: \,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty} \frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}. and for `|z| \ge 1` by analytic continuation. The analytic structure of this function is similar to that of `\,_2F_1`, generally with a singularity at `z = 1` and a branch cut on `(1, \infty)`. Evaluation is supported inside, on, and outside the circle of convergence `|z| = 1`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyp3f2(1,2,3,4,5,0.25) 1.083533123380934241548707 >>> hyp3f2(1,2+2j,3,4,5,-10+10j) (0.1574651066006004632914361 - 0.03194209021885226400892963j) >>> hyp3f2(1,2,3,4,5,-10) 0.3071141169208772603266489 >>> hyp3f2(1,2,3,4,5,10) (-0.4857045320523947050581423 - 0.5988311440454888436888028j) >>> hyp3f2(0.25,1,1,2,1.5,1) 1.157370995096772047567631 >>> (8-pi-2*ln2)/3 1.157370995096772047567631 >>> hyp3f2(1+j,0.5j,2,1,-2j,-1) (1.74518490615029486475959 + 0.1454701525056682297614029j) >>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j)) (0.9829816481834277511138055 - 0.4059040020276937085081127j) >>> hyp3f2(-3,2,1,-5,4,1) 1.41 >>> hyp3f2(-3,2,1,-5,4,2) 2.12 Evaluation very close to the unit circle:: >>> hyp3f2(1,2,3,4,5,'1.0001') (1.564877796743282766872279 - 3.76821518787438186031973e-11j) >>> hyp3f2(1,2,3,4,5,'1+0.0001j') (1.564747153061671573212831 + 0.0001305757570366084557648482j) >>> hyp3f2(1,2,3,4,5,'0.9999') 1.564616644881686134983664 >>> hyp3f2(1,2,3,4,5,'-0.9999') 0.7823896253461678060196207 .. note :: Evaluation for `|z-1|` small can currently be inaccurate or slow for some parameter combinations. For various parameter combinations, `\,_3F_2` admits representation in terms of hypergeometric functions of lower degree, or in terms of simpler functions:: >>> for a, b, z in [(1,2,-1), (2,0.5,1)]: ... hyp2f1(a,b,a+b+0.5,z)**2 ... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z) ... 0.4246104461966439006086308 0.4246104461966439006086308 7.111111111111111111111111 7.111111111111111111111111 >>> z = 2+3j >>> hyp3f2(0.5,1,1.5,2,2,z) (0.7621440939243342419729144 + 0.4249117735058037649915723j) >>> 4*(pi-2*ellipe(z))/(pi*z) (0.7621440939243342419729144 + 0.4249117735058037649915723j) """ hyperu = r""" Gives the Tricomi confluent hypergeometric function `U`, also known as the Kummer or confluent hypergeometric function of the second kind. This function gives a second linearly independent solution to the confluent hypergeometric differential equation (the first is provided by `\,_1F_1` -- see :func:`~mpmath.hyp1f1`). **Examples** Evaluation for arbitrary complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyperu(2,3,4) 0.0625 >>> hyperu(0.25, 5, 1000) 0.1779949416140579573763523 >>> hyperu(0.25, 5, -1000) (0.1256256609322773150118907 - 0.1256256609322773150118907j) The `U` function may be singular at `z = 0`:: >>> hyperu(1.5, 2, 0) +inf >>> hyperu(1.5, -2, 0) 0.1719434921288400112603671 Verifying the differential equation:: >>> a, b = 1.5, 2 >>> f = lambda z: hyperu(a,b,z) >>> for z in [-10, 3, 3+4j]: ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) ... 0.0 0.0 0.0 An integral representation:: >>> a,b,z = 2, 3.5, 4.25 >>> hyperu(a,b,z) 0.06674960718150520648014567 >>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a) 0.06674960718150520648014567 [1] http://people.math.sfu.ca/~cbm/aands/page_504.htm """ hyp2f0 = r""" Gives the hypergeometric function `\,_2F_0`, defined formally by the series .. math :: \,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}. This series usually does not converge. For small enough `z`, it can be viewed as an asymptotic series that may be summed directly with an appropriate truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum, or equivalently, it uses a representation in terms of the hypergeometric U function [1]. The series also converges when either `a` or `b` is a nonpositive integer, as it then terminates into a polynomial after `-a` or `-b` terms. **Examples** Evaluation is supported for arbitrary complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hyp2f0((2,3), 1.25, -100) 0.07095851870980052763312791 >>> hyp2f0((2,3), 1.25, 100) (-0.03254379032170590665041131 + 0.07269254613282301012735797j) >>> hyp2f0(-0.75, 1-j, 4j) (-0.3579987031082732264862155 - 3.052951783922142735255881j) Even with real arguments, the regularized value of 2F0 is often complex-valued, but the imaginary part decreases exponentially as `z \to 0`. In the following example, the first call uses complex evaluation while the second has a small enough `z` to evaluate using the direct series and thus the returned value is strictly real (this should be taken to indicate that the imaginary part is less than ``eps``):: >>> mp.dps = 15 >>> hyp2f0(1.5, 0.5, 0.05) (1.04166637647907 + 8.34584913683906e-8j) >>> hyp2f0(1.5, 0.5, 0.0005) 1.00037535207621 The imaginary part can be retrieved by increasing the working precision:: >>> mp.dps = 80 >>> nprint(hyp2f0(1.5, 0.5, 0.009).imag) 1.23828e-46 In the polynomial case (the series terminating), 2F0 can evaluate exactly:: >>> mp.dps = 15 >>> hyp2f0(-6,-6,2) 291793.0 >>> identify(hyp2f0(-2,1,0.25)) '(5/8)' The coefficients of the polynomials can be recovered using Taylor expansion:: >>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10)) [1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] >>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10)) [1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] [1] http://people.math.sfu.ca/~cbm/aands/page_504.htm """ gammainc = r""" ``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete gamma function with integration limits `[a, b]`: .. math :: \Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt The generalized incomplete gamma function reduces to the following special cases when one or both endpoints are fixed: * `\Gamma(z,0,\infty)` is the standard ("complete") gamma function, `\Gamma(z)` (available directly as the mpmath function :func:`~mpmath.gamma`) * `\Gamma(z,a,\infty)` is the "upper" incomplete gamma function, `\Gamma(z,a)` * `\Gamma(z,0,b)` is the "lower" incomplete gamma function, `\gamma(z,b)`. Of course, we have `\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)` for all `z` and `x`. Note however that some authors reverse the order of the arguments when defining the lower and upper incomplete gamma function, so one should be careful to get the correct definition. If also given the keyword argument ``regularized=True``, :func:`~mpmath.gammainc` computes the "regularized" incomplete gamma function .. math :: P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}. **Examples** We can compare with numerical quadrature to verify that :func:`~mpmath.gammainc` computes the integral in the definition:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> gammainc(2+3j, 4, 10) (0.00977212668627705160602312 - 0.0770637306312989892451977j) >>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10]) (0.00977212668627705160602312 - 0.0770637306312989892451977j) Argument symmetries follow directly from the integral definition:: >>> gammainc(3, 4, 5) + gammainc(3, 5, 4) 0.0 >>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4) 1.523793388892911312363331 1.523793388892911312363331 >>> findroot(lambda z: gammainc(2,z,3), 1) 3.0 Evaluation for arbitrarily large arguments:: >>> gammainc(10, 100) 4.083660630910611272288592e-26 >>> gammainc(10, 10000000000000000) 5.290402449901174752972486e-4342944819032375 >>> gammainc(3+4j, 1000000+1000000j) (-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j) Evaluation of a generalized incomplete gamma function automatically chooses the representation that gives a more accurate result, depending on which parameter is larger:: >>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad 0.0 >>> gammainc(10000000, 2, 3) # Good 1.755146243738946045873491e+4771204 >>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad 0.0 >>> gammainc(2, 100000000, 100000001) # Good 4.078258353474186729184421e-43429441 The incomplete gamma functions satisfy simple recurrence relations:: >>> mp.dps = 25 >>> z, a = mpf(3.5), mpf(2) >>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a) 10.60130296933533459267329 10.60130296933533459267329 >>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a) 1.030425427232114336470932 1.030425427232114336470932 Evaluation at integers and poles:: >>> gammainc(-3, -4, -5) (-0.2214577048967798566234192 + 0.0j) >>> gammainc(-3, 0, 5) +inf If `z` is an integer, the recurrence reduces the incomplete gamma function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and `Q` are polynomials:: >>> gammainc(1, 2); exp(-2) 0.1353352832366126918939995 0.1353352832366126918939995 >>> mp.dps = 50 >>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)']) '(326*exp(-1) + (-872)*exp(-2))' The incomplete gamma functions reduce to functions such as the exponential integral Ei and the error function for special arguments:: >>> mp.dps = 25 >>> gammainc(0, 4); -ei(-4) 0.00377935240984890647887486 0.00377935240984890647887486 >>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2)) 1.691806732945198336509541 1.691806732945198336509541 """ erf = r""" Computes the error function, `\mathrm{erf}(x)`. The error function is the normalized antiderivative of the Gaussian function `\exp(-t^2)`. More precisely, .. math:: \mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt **Basic examples** Simple values and limits include:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> erf(0) 0.0 >>> erf(1) 0.842700792949715 >>> erf(-1) -0.842700792949715 >>> erf(inf) 1.0 >>> erf(-inf) -1.0 For large real `x`, `\mathrm{erf}(x)` approaches 1 very rapidly:: >>> erf(3) 0.999977909503001 >>> erf(5) 0.999999999998463 The error function is an odd function:: >>> nprint(chop(taylor(erf, 0, 5))) [0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838] :func:`~mpmath.erf` implements arbitrary-precision evaluation and supports complex numbers:: >>> mp.dps = 50 >>> erf(0.5) 0.52049987781304653768274665389196452873645157575796 >>> mp.dps = 25 >>> erf(1+j) (1.316151281697947644880271 + 0.1904534692378346862841089j) Evaluation is supported for large arguments:: >>> mp.dps = 25 >>> erf('1e1000') 1.0 >>> erf('-1e1000') -1.0 >>> erf('1e-1000') 1.128379167095512573896159e-1000 >>> erf('1e7j') (0.0 + 8.593897639029319267398803e+43429448190317j) >>> erf('1e7+1e7j') (0.9999999858172446172631323 + 3.728805278735270407053139e-8j) **Related functions** See also :func:`~mpmath.erfc`, which is more accurate for large `x`, and :func:`~mpmath.erfi` which gives the antiderivative of `\exp(t^2)`. The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc` are also related to the error function. """ erfc = r""" Computes the complementary error function, `\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`. This function avoids cancellation that occurs when naively computing the complementary error function as ``1-erf(x)``:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> 1 - erf(10) 0.0 >>> erfc(10) 2.08848758376254e-45 :func:`~mpmath.erfc` works accurately even for ludicrously large arguments:: >>> erfc(10**10) 4.3504398860243e-43429448190325182776 Complex arguments are supported:: >>> erfc(500+50j) (1.19739830969552e-107492 + 1.46072418957528e-107491j) """ erfi = r""" Computes the imaginary error function, `\mathrm{erfi}(x)`. The imaginary error function is defined in analogy with the error function, but with a positive sign in the integrand: .. math :: \mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt Whereas the error function rapidly converges to 1 as `x` grows, the imaginary error function rapidly diverges to infinity. The functions are related as `\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex numbers `x`. **Examples** Basic values and limits:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> erfi(0) 0.0 >>> erfi(1) 1.65042575879754 >>> erfi(-1) -1.65042575879754 >>> erfi(inf) +inf >>> erfi(-inf) -inf Note the symmetry between erf and erfi:: >>> erfi(3j) (0.0 + 0.999977909503001j) >>> erf(3) 0.999977909503001 >>> erf(1+2j) (-0.536643565778565 - 5.04914370344703j) >>> erfi(2+1j) (-5.04914370344703 - 0.536643565778565j) Large arguments are supported:: >>> erfi(1000) 1.71130938718796e+434291 >>> erfi(10**10) 7.3167287567024e+43429448190325182754 >>> erfi(-10**10) -7.3167287567024e+43429448190325182754 >>> erfi(1000-500j) (2.49895233563961e+325717 + 2.6846779342253e+325717j) >>> erfi(100000j) (0.0 + 1.0j) >>> erfi(-100000j) (0.0 - 1.0j) """ erfinv = r""" Computes the inverse error function, satisfying .. math :: \mathrm{erf}(\mathrm{erfinv}(x)) = \mathrm{erfinv}(\mathrm{erf}(x)) = x. This function is defined only for `-1 \le x \le 1`. **Examples** Special values include:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> erfinv(0) 0.0 >>> erfinv(1) +inf >>> erfinv(-1) -inf The domain is limited to the standard interval:: >>> erfinv(2) Traceback (most recent call last): ... ValueError: erfinv(x) is defined only for -1 <= x <= 1 It is simple to check that :func:`~mpmath.erfinv` computes inverse values of :func:`~mpmath.erf` as promised:: >>> erf(erfinv(0.75)) 0.75 >>> erf(erfinv(-0.995)) -0.995 :func:`~mpmath.erfinv` supports arbitrary-precision evaluation:: >>> mp.dps = 50 >>> x = erf(2) >>> x 0.99532226501895273416206925636725292861089179704006 >>> erfinv(x) 2.0 A definite integral involving the inverse error function:: >>> mp.dps = 15 >>> quad(erfinv, [0, 1]) 0.564189583547756 >>> 1/sqrt(pi) 0.564189583547756 The inverse error function can be used to generate random numbers with a Gaussian distribution (although this is a relatively inefficient algorithm):: >>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP [-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012] """ npdf = r""" ``npdf(x, mu=0, sigma=1)`` evaluates the probability density function of a normal distribution with mean value `\mu` and variance `\sigma^2`. Elementary properties of the probability distribution can be verified using numerical integration:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> quad(npdf, [-inf, inf]) 1.0 >>> quad(lambda x: npdf(x, 3), [3, inf]) 0.5 >>> quad(lambda x: npdf(x, 3, 2), [3, inf]) 0.5 See also :func:`~mpmath.ncdf`, which gives the cumulative distribution. """ ncdf = r""" ``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution function of a normal distribution with mean value `\mu` and variance `\sigma^2`. See also :func:`~mpmath.npdf`, which gives the probability density. Elementary properties include:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> ncdf(pi, mu=pi) 0.5 >>> ncdf(-inf) 0.0 >>> ncdf(+inf) 1.0 The cumulative distribution is the integral of the density function having identical mu and sigma:: >>> mp.dps = 15 >>> diff(ncdf, 2) 0.053990966513188 >>> npdf(2) 0.053990966513188 >>> diff(lambda x: ncdf(x, 1, 0.5), 0) 0.107981933026376 >>> npdf(0, 1, 0.5) 0.107981933026376 """ expint = r""" :func:`~mpmath.expint(n,z)` gives the generalized exponential integral or En-function, .. math :: \mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt, where `n` and `z` may both be complex numbers. The case with `n = 1` is also given by :func:`~mpmath.e1`. **Examples** Evaluation at real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> expint(1, 6.25) 0.0002704758872637179088496194 >>> expint(-3, 2+3j) (0.00299658467335472929656159 + 0.06100816202125885450319632j) >>> expint(2+3j, 4-5j) (0.001803529474663565056945248 - 0.002235061547756185403349091j) At negative integer values of `n`, `E_n(z)` reduces to a rational-exponential function:: >>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\ ... exp(z)/z**(n+2) >>> n = 3 >>> z = 1/pi >>> expint(-n,z) 584.2604820613019908668219 >>> f(n,z) 584.2604820613019908668219 >>> n = 5 >>> expint(-n,z) 115366.5762594725451811138 >>> f(n,z) 115366.5762594725451811138 """ e1 = r""" Computes the exponential integral `\mathrm{E}_1(z)`, given by .. math :: \mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt. This is equivalent to :func:`~mpmath.expint` with `n = 1`. **Examples** Two ways to evaluate this function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> e1(6.25) 0.0002704758872637179088496194 >>> expint(1,6.25) 0.0002704758872637179088496194 The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`) with negated argument, except for an imaginary branch cut term:: >>> e1(2.5) 0.02491491787026973549562801 >>> -ei(-2.5) 0.02491491787026973549562801 >>> e1(-2.5) (-7.073765894578600711923552 - 3.141592653589793238462643j) >>> -ei(2.5) -7.073765894578600711923552 """ ei = r""" Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`. The exponential integral is defined as .. math :: \mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt. When the integration range includes `t = 0`, the exponential integral is interpreted as providing the Cauchy principal value. For real `x`, the Ei-function behaves roughly like `\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`. The Ei-function is related to the more general family of exponential integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`. **Basic examples** Some basic values and limits are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> ei(0) -inf >>> ei(1) 1.89511781635594 >>> ei(inf) +inf >>> ei(-inf) 0.0 For `x < 0`, the defining integral can be evaluated numerically as a reference:: >>> ei(-4) -0.00377935240984891 >>> quad(lambda t: exp(t)/t, [-inf, -4]) -0.00377935240984891 :func:`~mpmath.ei` supports complex arguments and arbitrary precision evaluation:: >>> mp.dps = 50 >>> ei(pi) 10.928374389331410348638445906907535171566338835056 >>> mp.dps = 25 >>> ei(3+4j) (-4.154091651642689822535359 + 4.294418620024357476985535j) **Related functions** The exponential integral is closely related to the logarithmic integral. See :func:`~mpmath.li` for additional information. The exponential integral is related to the hyperbolic and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`, :func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary exponential function is related to the hyperbolic and trigonometric functions:: >>> mp.dps = 15 >>> ei(3) 9.93383257062542 >>> chi(3) + shi(3) 9.93383257062542 >>> chop(ci(3j) - j*si(3j) - pi*j/2) 9.93383257062542 Beware that logarithmic corrections, as in the last example above, are required to obtain the correct branch in general. For details, see [1]. The exponential integral is also a special case of the hypergeometric function `\,_2F_2`:: >>> z = 0.6 >>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler 0.769881289937359 >>> ei(z) 0.769881289937359 **References** 1. Relations between Ei and other functions: http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/ 2. Abramowitz & Stegun, section 5: http://people.math.sfu.ca/~cbm/aands/page_228.htm 3. Asymptotic expansion for Ei: http://mathworld.wolfram.com/En-Function.html """ li = r""" Computes the logarithmic integral or li-function `\mathrm{li}(x)`, defined by .. math :: \mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt The logarithmic integral has a singularity at `x = 1`. Alternatively, ``li(x, offset=True)`` computes the offset logarithmic integral (used in number theory) .. math :: \mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt. These two functions are related via the simple identity `\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`. The logarithmic integral should also not be confused with the polylogarithm (also denoted by Li), which is implemented as :func:`~mpmath.polylog`. **Examples** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> li(0) 0.0 >>> li(1) -inf >>> li(1) -inf >>> li(2) 1.04516378011749278484458888919 >>> findroot(li, 2) 1.45136923488338105028396848589 >>> li(inf) +inf >>> li(2, offset=True) 0.0 >>> li(1, offset=True) -inf >>> li(0, offset=True) -1.04516378011749278484458888919 >>> li(10, offset=True) 5.12043572466980515267839286347 The logarithmic integral can be evaluated for arbitrary complex arguments:: >>> mp.dps = 20 >>> li(3+4j) (3.1343755504645775265 + 2.6769247817778742392j) The logarithmic integral is related to the exponential integral:: >>> ei(log(3)) 2.1635885946671919729 >>> li(3) 2.1635885946671919729 The logarithmic integral grows like `O(x/\log(x))`:: >>> mp.dps = 15 >>> x = 10**100 >>> x/log(x) 4.34294481903252e+97 >>> li(x) 4.3619719871407e+97 The prime number theorem states that the number of primes less than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently `\mathrm{li}(x)`). For example, it is known that there are exactly 1,925,320,391,606,803,968,923 prime numbers less than `10^{23}` [1]. The logarithmic integral provides a very accurate estimate:: >>> li(10**23, offset=True) 1.92532039161405e+21 A definite integral is:: >>> quad(li, [0, 1]) -0.693147180559945 >>> -ln(2) -0.693147180559945 **References** 1. http://mathworld.wolfram.com/PrimeCountingFunction.html 2. http://mathworld.wolfram.com/LogarithmicIntegral.html """ ci = r""" Computes the cosine integral, .. math :: \mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt = \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt **Examples** Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ci(0) -inf >>> ci(1) 0.3374039229009681346626462 >>> ci(pi) 0.07366791204642548599010096 >>> ci(inf) 0.0 >>> ci(-inf) (0.0 + 3.141592653589793238462643j) >>> ci(2+3j) (1.408292501520849518759125 - 2.983617742029605093121118j) The cosine integral behaves roughly like the sinc function (see :func:`~mpmath.sinc`) for large real `x`:: >>> ci(10**10) -4.875060251748226537857298e-11 >>> sinc(10**10) -4.875060250875106915277943e-11 >>> chop(limit(ci, inf)) 0.0 It has infinitely many roots on the positive real axis:: >>> findroot(ci, 1) 0.6165054856207162337971104 >>> findroot(ci, 2) 3.384180422551186426397851 Evaluation is supported for `z` anywhere in the complex plane:: >>> ci(10**6*(1+j)) (4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j) We can evaluate the defining integral as a reference:: >>> mp.dps = 15 >>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1) -0.190029749656644 >>> ci(5) -0.190029749656644 Some infinite series can be evaluated using the cosine integral:: >>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf]) -0.239811742000565 >>> ci(1) - euler -0.239811742000565 """ si = r""" Computes the sine integral, .. math :: \mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt. The sine integral is thus the antiderivative of the sinc function (see :func:`~mpmath.sinc`). **Examples** Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> si(0) 0.0 >>> si(1) 0.9460830703671830149413533 >>> si(-1) -0.9460830703671830149413533 >>> si(pi) 1.851937051982466170361053 >>> si(inf) 1.570796326794896619231322 >>> si(-inf) -1.570796326794896619231322 >>> si(2+3j) (4.547513889562289219853204 + 1.399196580646054789459839j) The sine integral approaches `\pi/2` for large real `x`:: >>> si(10**10) 1.570796326707584656968511 >>> pi/2 1.570796326794896619231322 Evaluation is supported for `z` anywhere in the complex plane:: >>> si(10**6*(1+j)) (-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j) We can evaluate the defining integral as a reference:: >>> mp.dps = 15 >>> quad(sinc, [0, 5]) 1.54993124494467 >>> si(5) 1.54993124494467 Some infinite series can be evaluated using the sine integral:: >>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf]) 0.946083070367183 >>> si(1) 0.946083070367183 """ chi = r""" Computes the hyperbolic cosine integral, defined in analogy with the cosine integral (see :func:`~mpmath.ci`) as .. math :: \mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt = \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> chi(0) -inf >>> chi(1) 0.8378669409802082408946786 >>> chi(inf) +inf >>> findroot(chi, 0.5) 0.5238225713898644064509583 >>> chi(2+3j) (-0.1683628683277204662429321 + 2.625115880451325002151688j) Evaluation is supported for `z` anywhere in the complex plane:: >>> chi(10**6*(1+j)) (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) """ shi = r""" Computes the hyperbolic sine integral, defined in analogy with the sine integral (see :func:`~mpmath.si`) as .. math :: \mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt. Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> shi(0) 0.0 >>> shi(1) 1.057250875375728514571842 >>> shi(-1) -1.057250875375728514571842 >>> shi(inf) +inf >>> shi(2+3j) (-0.1931890762719198291678095 + 2.645432555362369624818525j) Evaluation is supported for `z` anywhere in the complex plane:: >>> shi(10**6*(1+j)) (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) """ fresnels = r""" Computes the Fresnel sine integral .. math :: S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt Note that some sources define this function without the normalization factor `\pi/2`. **Examples** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> fresnels(0) 0.0 >>> fresnels(inf) 0.5 >>> fresnels(-inf) -0.5 >>> fresnels(1) 0.4382591473903547660767567 >>> fresnels(1+2j) (36.72546488399143842838788 + 15.58775110440458732748279j) Comparing with the definition:: >>> fresnels(3) 0.4963129989673750360976123 >>> quad(lambda t: sin(pi*t**2/2), [0,3]) 0.4963129989673750360976123 """ fresnelc = r""" Computes the Fresnel cosine integral .. math :: C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt Note that some sources define this function without the normalization factor `\pi/2`. **Examples** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> fresnelc(0) 0.0 >>> fresnelc(inf) 0.5 >>> fresnelc(-inf) -0.5 >>> fresnelc(1) 0.7798934003768228294742064 >>> fresnelc(1+2j) (16.08787137412548041729489 - 36.22568799288165021578758j) Comparing with the definition:: >>> fresnelc(3) 0.6057207892976856295561611 >>> quad(lambda t: cos(pi*t**2/2), [0,3]) 0.6057207892976856295561611 """ airyai = r""" Computes the Airy function `\operatorname{Ai}(z)`, which is the solution of the Airy differential equation `f''(z) - z f(z) = 0` with initial conditions .. math :: \operatorname{Ai}(0) = \frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)} \operatorname{Ai}'(0) = -\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}. Other common ways of defining the Ai-function include integrals such as .. math :: \operatorname{Ai}(x) = \frac{1}{\pi} \int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt \qquad x \in \mathbb{R} \operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi} \int_0^{\infty} \exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt. The Ai-function is an entire function with a turning point, behaving roughly like a slowly decaying sine wave for `z < 0` and like a rapidly decreasing exponential for `z > 0`. A second solution of the Airy differential equation is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`). Optionally, with *derivative=alpha*, :func:`airyai` can compute the `\alpha`-th order fractional derivative with respect to `z`. For `\alpha = n = 1,2,3,\ldots` this gives the derivative `\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots` this gives the `n`-fold iterated integral .. math :: f_0(z) = \operatorname{Ai}(z) f_n(z) = \int_0^z f_{n-1}(t) dt. The Ai-function has infinitely many zeros, all located along the negative half of the real axis. They can be computed with :func:`~mpmath.airyaizero`. **Plots** .. literalinclude :: /plots/ai.py .. image :: /plots/ai.png .. literalinclude :: /plots/ai_c.py .. image :: /plots/ai_c.png **Basic examples** Limits and values include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> airyai(0); 1/(power(3,'2/3')*gamma('2/3')) 0.3550280538878172392600632 0.3550280538878172392600632 >>> airyai(1) 0.1352924163128814155241474 >>> airyai(-1) 0.5355608832923521187995166 >>> airyai(inf); airyai(-inf) 0.0 0.0 Evaluation is supported for large magnitudes of the argument:: >>> airyai(-100) 0.1767533932395528780908311 >>> airyai(100) 2.634482152088184489550553e-291 >>> airyai(50+50j) (-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j) >>> airyai(-50+50j) (1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j) Huge arguments are also fine:: >>> airyai(10**10) 1.162235978298741779953693e-289529654602171 >>> airyai(-10**10) 0.0001736206448152818510510181 >>> w = airyai(10**10*(1+j)) >>> w.real 5.711508683721355528322567e-186339621747698 >>> w.imag 1.867245506962312577848166e-186339621747697 The first root of the Ai-function is:: >>> findroot(airyai, -2) -2.338107410459767038489197 >>> airyaizero(1) -2.338107410459767038489197 **Properties and relations** Verifying the Airy differential equation:: >>> for z in [-3.4, 0, 2.5, 1+2j]: ... chop(airyai(z,2) - z*airyai(z)) ... 0.0 0.0 0.0 0.0 The first few terms of the Taylor series expansion around `z = 0` (every third term is zero):: >>> nprint(taylor(airyai, 0, 5)) [0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0] The Airy functions satisfy the Wronskian relation `\operatorname{Ai}(z) \operatorname{Bi}'(z) - \operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`:: >>> z = -0.5 >>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z) 0.3183098861837906715377675 >>> 1/pi 0.3183098861837906715377675 The Airy functions can be expressed in terms of Bessel functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: >>> z = -3 >>> airyai(z) -0.3788142936776580743472439 >>> y = 2*power(-z,'3/2')/3 >>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3 -0.3788142936776580743472439 **Derivatives and integrals** Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`):: >>> airyai(-3,1); diff(airyai,-3) 0.3145837692165988136507873 0.3145837692165988136507873 >>> airyai(-3,2); diff(airyai,-3,2) 1.136442881032974223041732 1.136442881032974223041732 >>> airyai(1000,1); diff(airyai,1000) -2.943133917910336090459748e-9156 -2.943133917910336090459748e-9156 Several derivatives at `z = 0`:: >>> airyai(0,0); airyai(0,1); airyai(0,2) 0.3550280538878172392600632 -0.2588194037928067984051836 0.0 >>> airyai(0,3); airyai(0,4); airyai(0,5) 0.3550280538878172392600632 -0.5176388075856135968103671 0.0 >>> airyai(0,15); airyai(0,16); airyai(0,17) 1292.30211615165475090663 -3188.655054727379756351861 0.0 The integral of the Ai-function:: >>> airyai(3,-1); quad(airyai, [0,3]) 0.3299203760070217725002701 0.3299203760070217725002701 >>> airyai(-10,-1); quad(airyai, [0,-10]) -0.765698403134212917425148 -0.765698403134212917425148 Integrals of high or fractional order:: >>> airyai(-2,0.5); differint(airyai,-2,0.5,0) (0.0 + 0.2453596101351438273844725j) (0.0 + 0.2453596101351438273844725j) >>> airyai(-2,-4); differint(airyai,-2,-4,0) 0.2939176441636809580339365 0.2939176441636809580339365 >>> airyai(0,-1); airyai(0,-2); airyai(0,-3) 0.0 0.0 0.0 Integrals of the Ai-function can be evaluated at limit points:: >>> airyai(-1000000,-1); airyai(-inf,-1) -0.6666843728311539978751512 -0.6666666666666666666666667 >>> airyai(10,-1); airyai(+inf,-1) 0.3333333332991690159427932 0.3333333333333333333333333 >>> airyai(+inf,-2); airyai(+inf,-3) +inf +inf >>> airyai(-1000000,-2); airyai(-inf,-2) 666666.4078472650651209742 +inf >>> airyai(-1000000,-3); airyai(-inf,-3) -333333074513.7520264995733 -inf **References** 1. [DLMF]_ Chapter 9: Airy and Related Functions 2. [WolframFunctions]_ section: Bessel-Type Functions """ airybi = r""" Computes the Airy function `\operatorname{Bi}(z)`, which is the solution of the Airy differential equation `f''(z) - z f(z) = 0` with initial conditions .. math :: \operatorname{Bi}(0) = \frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)} \operatorname{Bi}'(0) = \frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}. Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function is oscillatory for `z < 0`, but it grows rather than decreases for `z > 0`. Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals and fractional derivatives can be computed with the *derivative* parameter. The Bi-function has infinitely many zeros along the negative half-axis, as well as complex zeros, which can all be computed with :func:`~mpmath.airybizero`. **Plots** .. literalinclude :: /plots/bi.py .. image :: /plots/bi.png .. literalinclude :: /plots/bi_c.py .. image :: /plots/bi_c.png **Basic examples** Limits and values include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> airybi(0); 1/(power(3,'1/6')*gamma('2/3')) 0.6149266274460007351509224 0.6149266274460007351509224 >>> airybi(1) 1.207423594952871259436379 >>> airybi(-1) 0.10399738949694461188869 >>> airybi(inf); airybi(-inf) +inf 0.0 Evaluation is supported for large magnitudes of the argument:: >>> airybi(-100) 0.02427388768016013160566747 >>> airybi(100) 6.041223996670201399005265e+288 >>> airybi(50+50j) (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) >>> airybi(-50+50j) (-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j) Huge arguments:: >>> airybi(10**10) 1.369385787943539818688433e+289529654602165 >>> airybi(-10**10) 0.001775656141692932747610973 >>> w = airybi(10**10*(1+j)) >>> w.real -6.559955931096196875845858e+186339621747689 >>> w.imag -6.822462726981357180929024e+186339621747690 The first real root of the Bi-function is:: >>> findroot(airybi, -1); airybizero(1) -1.17371322270912792491998 -1.17371322270912792491998 **Properties and relations** Verifying the Airy differential equation:: >>> for z in [-3.4, 0, 2.5, 1+2j]: ... chop(airybi(z,2) - z*airybi(z)) ... 0.0 0.0 0.0 0.0 The first few terms of the Taylor series expansion around `z = 0` (every third term is zero):: >>> nprint(taylor(airybi, 0, 5)) [0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0] The Airy functions can be expressed in terms of Bessel functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: >>> z = -3 >>> airybi(z) -0.1982896263749265432206449 >>> p = 2*power(-z,'3/2')/3 >>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p)) -0.1982896263749265432206449 **Derivatives and integrals** Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`):: >>> airybi(-3,1); diff(airybi,-3) -0.675611222685258537668032 -0.675611222685258537668032 >>> airybi(-3,2); diff(airybi,-3,2) 0.5948688791247796296619346 0.5948688791247796296619346 >>> airybi(1000,1); diff(airybi,1000) 1.710055114624614989262335e+9156 1.710055114624614989262335e+9156 Several derivatives at `z = 0`:: >>> airybi(0,0); airybi(0,1); airybi(0,2) 0.6149266274460007351509224 0.4482883573538263579148237 0.0 >>> airybi(0,3); airybi(0,4); airybi(0,5) 0.6149266274460007351509224 0.8965767147076527158296474 0.0 >>> airybi(0,15); airybi(0,16); airybi(0,17) 2238.332923903442675949357 5522.912562599140729510628 0.0 The integral of the Bi-function:: >>> airybi(3,-1); quad(airybi, [0,3]) 10.06200303130620056316655 10.06200303130620056316655 >>> airybi(-10,-1); quad(airybi, [0,-10]) -0.01504042480614002045135483 -0.01504042480614002045135483 Integrals of high or fractional order:: >>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0) (0.0 + 0.5019859055341699223453257j) (0.0 + 0.5019859055341699223453257j) >>> airybi(-2,-4); differint(airybi,-2,-4,0) 0.2809314599922447252139092 0.2809314599922447252139092 >>> airybi(0,-1); airybi(0,-2); airybi(0,-3) 0.0 0.0 0.0 Integrals of the Bi-function can be evaluated at limit points:: >>> airybi(-1000000,-1); airybi(-inf,-1) 0.000002191261128063434047966873 0.0 >>> airybi(10,-1); airybi(+inf,-1) 147809803.1074067161675853 +inf >>> airybi(+inf,-2); airybi(+inf,-3) +inf +inf >>> airybi(-1000000,-2); airybi(-inf,-2) 0.4482883750599908479851085 0.4482883573538263579148237 >>> gamma('2/3')*power(3,'2/3')/(2*pi) 0.4482883573538263579148237 >>> airybi(-100000,-3); airybi(-inf,-3) -44828.52827206932872493133 -inf >>> airybi(-100000,-4); airybi(-inf,-4) 2241411040.437759489540248 +inf """ airyaizero = r""" Gives the `k`-th zero of the Airy Ai-function, i.e. the `k`-th number `a_k` ordered by magnitude for which `\operatorname{Ai}(a_k) = 0`. Optionally, with *derivative=1*, the corresponding zero `a'_k` of the derivative function, i.e. `\operatorname{Ai}'(a'_k) = 0`, is computed. **Examples** Some values of `a_k`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> airyaizero(1) -2.338107410459767038489197 >>> airyaizero(2) -4.087949444130970616636989 >>> airyaizero(3) -5.520559828095551059129856 >>> airyaizero(1000) -281.0315196125215528353364 Some values of `a'_k`:: >>> airyaizero(1,1) -1.018792971647471089017325 >>> airyaizero(2,1) -3.248197582179836537875424 >>> airyaizero(3,1) -4.820099211178735639400616 >>> airyaizero(1000,1) -280.9378080358935070607097 Verification:: >>> chop(airyai(airyaizero(1))) 0.0 >>> chop(airyai(airyaizero(1,1),1)) 0.0 """ airybizero = r""" With *complex=False*, gives the `k`-th real zero of the Airy Bi-function, i.e. the `k`-th number `b_k` ordered by magnitude for which `\operatorname{Bi}(b_k) = 0`. With *complex=True*, gives the `k`-th complex zero in the upper half plane `\beta_k`. Also the conjugate `\overline{\beta_k}` is a zero. Optionally, with *derivative=1*, the corresponding zero `b'_k` or `\beta'_k` of the derivative function, i.e. `\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`, is computed. **Examples** Some values of `b_k`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> airybizero(1) -1.17371322270912792491998 >>> airybizero(2) -3.271093302836352715680228 >>> airybizero(3) -4.830737841662015932667709 >>> airybizero(1000) -280.9378112034152401578834 Some values of `b_k`:: >>> airybizero(1,1) -2.294439682614123246622459 >>> airybizero(2,1) -4.073155089071828215552369 >>> airybizero(3,1) -5.512395729663599496259593 >>> airybizero(1000,1) -281.0315164471118527161362 Some values of `\beta_k`:: >>> airybizero(1,complex=True) (0.9775448867316206859469927 + 2.141290706038744575749139j) >>> airybizero(2,complex=True) (1.896775013895336346627217 + 3.627291764358919410440499j) >>> airybizero(3,complex=True) (2.633157739354946595708019 + 4.855468179979844983174628j) >>> airybizero(1000,complex=True) (140.4978560578493018899793 + 243.3907724215792121244867j) Some values of `\beta'_k`:: >>> airybizero(1,1,complex=True) (0.2149470745374305676088329 + 1.100600143302797880647194j) >>> airybizero(2,1,complex=True) (1.458168309223507392028211 + 2.912249367458445419235083j) >>> airybizero(3,1,complex=True) (2.273760763013482299792362 + 4.254528549217097862167015j) >>> airybizero(1000,1,complex=True) (140.4509972835270559730423 + 243.3096175398562811896208j) Verification:: >>> chop(airybi(airybizero(1))) 0.0 >>> chop(airybi(airybizero(1,1),1)) 0.0 >>> u = airybizero(1,complex=True) >>> chop(airybi(u)) 0.0 >>> chop(airybi(conj(u))) 0.0 The complex zeros (in the upper and lower half-planes respectively) asymptotically approach the rays `z = R \exp(\pm i \pi /3)`:: >>> arg(airybizero(1,complex=True)) 1.142532510286334022305364 >>> arg(airybizero(1000,complex=True)) 1.047271114786212061583917 >>> arg(airybizero(1000000,complex=True)) 1.047197624741816183341355 >>> pi/3 1.047197551196597746154214 """ ellipk = r""" Evaluates the complete elliptic integral of the first kind, `K(m)`, defined by .. math :: K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \, \frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right). Note that the argument is the parameter `m = k^2`, not the modulus `k` which is sometimes used. **Plots** .. literalinclude :: /plots/ellipk.py .. image :: /plots/ellipk.png **Examples** Values and limits include:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ellipk(0) 1.570796326794896619231322 >>> ellipk(inf) (0.0 + 0.0j) >>> ellipk(-inf) 0.0 >>> ellipk(1) +inf >>> ellipk(-1) 1.31102877714605990523242 >>> ellipk(2) (1.31102877714605990523242 - 1.31102877714605990523242j) Verifying the defining integral and hypergeometric representation:: >>> ellipk(0.5) 1.85407467730137191843385 >>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2]) 1.85407467730137191843385 >>> pi/2*hyp2f1(0.5,0.5,1,0.5) 1.85407467730137191843385 Evaluation is supported for arbitrary complex `m`:: >>> ellipk(3+4j) (0.9111955638049650086562171 + 0.6313342832413452438845091j) A definite integral:: >>> quad(ellipk, [0, 1]) 2.0 """ agm = r""" ``agm(a, b)`` computes the arithmetic-geometric mean of `a` and `b`, defined as the limit of the following iteration: .. math :: a_0 = a b_0 = b a_{n+1} = \frac{a_n+b_n}{2} b_{n+1} = \sqrt{a_n b_n} This function can be called with a single argument, computing `\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`. **Examples** It is a well-known theorem that the geometric mean of two distinct positive numbers is less than the arithmetic mean. It follows that the arithmetic-geometric mean lies between the two means:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> a = mpf(3) >>> b = mpf(4) >>> sqrt(a*b) 3.46410161513775 >>> agm(a,b) 3.48202767635957 >>> (a+b)/2 3.5 The arithmetic-geometric mean is scale-invariant:: >>> agm(10*e, 10*pi) 29.261085515723 >>> 10*agm(e, pi) 29.261085515723 As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x` for large `x`:: >>> agm(10**10) 643448704.760133 >>> agm(10**50) 1.34814309345871e+48 For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`:: >>> agm('0.01') 0.262166887202249 >>> -pi/2/log('0.0025') 0.262172347753122 The arithmetic-geometric mean can also be computed for complex numbers:: >>> agm(3, 2+j) (2.51055133276184 + 0.547394054060638j) The AGM iteration converges very quickly (each step doubles the number of correct digits), so :func:`~mpmath.agm` supports efficient high-precision evaluation:: >>> mp.dps = 10000 >>> a = agm(1,2) >>> str(a)[-10:] '1679581912' **Mathematical relations** The arithmetic-geometric mean may be used to evaluate the following two parametric definite integrals: .. math :: I_1 = \int_0^{\infty} \frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx I_2 = \int_0^{\pi/2} \frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx We have:: >>> mp.dps = 15 >>> a = 3 >>> b = 4 >>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5 >>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5 >>> quad(f1, [0, inf]) 0.451115405388492 >>> quad(f2, [0, pi/2]) 0.451115405388492 >>> pi/(2*agm(a,b)) 0.451115405388492 A formula for `\Gamma(1/4)`:: >>> gamma(0.25) 3.62560990822191 >>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))) 3.62560990822191 **Possible issues** The branch cut chosen for complex `a` and `b` is somewhat arbitrary. """ gegenbauer = r""" Evaluates the Gegenbauer polynomial, or ultraspherical polynomial, .. math :: C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a; a+\frac{1}{2}; \frac{1}{2}(1-z)\right). When `n` is a nonnegative integer, this formula gives a polynomial in `z` of degree `n`, but all parameters are permitted to be complex numbers. With `a = 1/2`, the Gegenbauer polynomial reduces to a Legendre polynomial. **Examples** Evaluation for arbitrary arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> gegenbauer(3, 0.5, -10) -2485.0 >>> gegenbauer(1000, 10, 100) 3.012757178975667428359374e+2322 >>> gegenbauer(2+3j, -0.75, -1000j) (-5038991.358609026523401901 + 9414549.285447104177860806j) Evaluation at negative integer orders:: >>> gegenbauer(-4, 2, 1.75) -1.0 >>> gegenbauer(-4, 3, 1.75) 0.0 >>> gegenbauer(-4, 2j, 1.75) 0.0 >>> gegenbauer(-7, 0.5, 3) 8989.0 The Gegenbauer polynomials solve the differential equation:: >>> n, a = 4.5, 1+2j >>> f = lambda z: gegenbauer(n, a, z) >>> for z in [0, 0.75, -0.5j]: ... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z)) ... 0.0 0.0 0.0 The Gegenbauer polynomials have generating function `(1-2zt+t^2)^{-a}`:: >>> a, z = 2.5, 1 >>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3) [1.0, 5.0, 15.0, 35.0] >>> [gegenbauer(n,a,z) for n in range(4)] [1.0, 5.0, 15.0, 35.0] The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect to the weight `(1-z^2)^{a-\frac{1}{2}}`:: >>> a, n, m = 2.5, 4, 5 >>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000) >>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000) >>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1])) 0.0 """ laguerre = r""" Gives the generalized (associated) Laguerre polynomial, defined by .. math :: L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)} \,_1F_1(-n, a+1, z). With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary Laguerre polynomial, the sequence of which begins `L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`. The Laguerre polynomials are orthogonal with respect to the weight `z^a e^{-z}` on `[0, \infty)`. **Plots** .. literalinclude :: /plots/laguerre.py .. image :: /plots/laguerre.png **Examples** Evaluation for arbitrary arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> laguerre(5, 0, 0.25) 0.03726399739583333333333333 >>> laguerre(1+j, 0.5, 2+3j) (4.474921610704496808379097 - 11.02058050372068958069241j) >>> laguerre(2, 0, 10000) 49980001.0 >>> laguerre(2.5, 0, 10000) -9.327764910194842158583189e+4328 The first few Laguerre polynomials, normalized to have integer coefficients:: >>> for n in range(7): ... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n)) ... [1.0] [1.0, -1.0] [2.0, -4.0, 1.0] [6.0, -18.0, 9.0, -1.0] [24.0, -96.0, 72.0, -16.0, 1.0] [120.0, -600.0, 600.0, -200.0, 25.0, -1.0] [720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0] Verifying orthogonality:: >>> Lm = lambda t: laguerre(m,a,t) >>> Ln = lambda t: laguerre(n,a,t) >>> a, n, m = 2.5, 2, 3 >>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf])) 0.0 """ hermite = r""" Evaluates the Hermite polynomial `H_n(z)`, which may be defined using the recurrence .. math :: H_0(z) = 1 H_1(z) = 2z H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z). The Hermite polynomials are orthogonal on `(-\infty, \infty)` with respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex values of `n`, the Hermite function `H_n(z)` is defined as .. math :: H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2}, -\frac{1}{z^2}\right) for `\Re{z} > 0`, or generally .. math :: H_n(z) = 2^n \sqrt{\pi} \left( \frac{1}{\Gamma\left(\frac{1-n}{2}\right)} \,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) - \frac{2z}{\Gamma\left(-\frac{n}{2}\right)} \,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right) \right). **Plots** .. literalinclude :: /plots/hermite.py .. image :: /plots/hermite.png **Examples** Evaluation for arbitrary arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hermite(0, 10) 1.0 >>> hermite(1, 10); hermite(2, 10) 20.0 398.0 >>> hermite(10000, 2) 4.950440066552087387515653e+19334 >>> hermite(3, -10**8) -7999999999999998800000000.0 >>> hermite(-3, -10**8) 1.675159751729877682920301e+4342944819032534 >>> hermite(2+3j, -1+2j) (-0.07652130602993513389421901 - 0.1084662449961914580276007j) Coefficients of the first few Hermite polynomials are:: >>> for n in range(7): ... chop(taylor(lambda z: hermite(n, z), 0, n)) ... [1.0] [0.0, 2.0] [-2.0, 0.0, 4.0] [0.0, -12.0, 0.0, 8.0] [12.0, 0.0, -48.0, 0.0, 16.0] [0.0, 120.0, 0.0, -160.0, 0.0, 32.0] [-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0] Values at `z = 0`:: >>> for n in range(-5, 9): ... hermite(n, 0) ... 0.02769459142039868792653387 0.08333333333333333333333333 0.2215567313631895034122709 0.5 0.8862269254527580136490837 1.0 0.0 -2.0 0.0 12.0 0.0 -120.0 0.0 1680.0 Hermite functions satisfy the differential equation:: >>> n = 4 >>> f = lambda z: hermite(n, z) >>> z = 1.5 >>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z)) 0.0 Verifying orthogonality:: >>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf])) 0.0 """ jacobi = r""" ``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial `P_n^{(a,b)}(x)`. The Jacobi polynomials are a special case of the hypergeometric function `\,_2F_1` given by: .. math :: P_n^{(a,b)}(x) = {n+a \choose n} \,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right). Note that this definition generalizes to nonintegral values of `n`. When `n` is an integer, the hypergeometric series terminates after a finite number of terms, giving a polynomial in `x`. **Evaluation of Jacobi polynomials** A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> jacobi(4, 0.5, 0.25, 1) 2.4609375 >>> binomial(4+0.5, 4) 2.4609375 A Jacobi polynomial of degree `n` is equal to its Taylor polynomial of degree `n`. The explicit coefficients of Jacobi polynomials can therefore be recovered easily using :func:`~mpmath.taylor`:: >>> for n in range(5): ... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n)) ... [1.0] [-0.5, 2.5] [-0.75, -1.5, 5.25] [0.5, -3.5, -3.5, 10.5] [0.625, 2.5, -11.25, -7.5, 20.625] For nonintegral `n`, the Jacobi "polynomial" is no longer a polynomial:: >>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4)) [0.309983, 1.84119, -1.26933, 1.26699, -1.34808] **Orthogonality** The Jacobi polynomials are orthogonal on the interval `[-1, 1]` with respect to the weight function `w(x) = (1-x)^a (1+x)^b`. That is, `w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to zero if `m \ne n` and to a nonzero number if `m = n`. The orthogonality is easy to verify using numerical quadrature:: >>> P = jacobi >>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x) >>> a = 2 >>> b = 3 >>> m, n = 3, 4 >>> chop(quad(f, [-1, 1]), 1) 0.0 >>> m, n = 4, 4 >>> quad(f, [-1, 1]) 1.9047619047619 **Differential equation** The Jacobi polynomials are solutions of the differential equation .. math :: (1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0. We can verify that :func:`~mpmath.jacobi` approximately satisfies this equation:: >>> from mpmath import * >>> mp.dps = 15 >>> a = 2.5 >>> b = 4 >>> n = 3 >>> y = lambda x: jacobi(n,a,b,x) >>> x = pi >>> A0 = n*(n+a+b+1)*y(x) >>> A1 = (b-a-(a+b+2)*x)*diff(y,x) >>> A2 = (1-x**2)*diff(y,x,2) >>> nprint(A2 + A1 + A0, 1) 4.0e-12 The difference of order `10^{-12}` is as close to zero as it could be at 15-digit working precision, since the terms are large:: >>> A0, A1, A2 (26560.2328981879, -21503.7641037294, -5056.46879445852) """ legendre = r""" ``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`. The Legendre polynomials are given by the formula .. math :: P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n. Alternatively, they can be computed recursively using .. math :: P_0(x) = 1 P_1(x) = x (n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x). A third definition is in terms of the hypergeometric function `\,_2F_1`, whereby they can be generalized to arbitrary `n`: .. math :: P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right) **Plots** .. literalinclude :: /plots/legendre.py .. image :: /plots/legendre.png **Basic evaluation** The Legendre polynomials assume fixed values at the points `x = -1` and `x = 1`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nprint([legendre(n, 1) for n in range(6)]) [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] >>> nprint([legendre(n, -1) for n in range(6)]) [1.0, -1.0, 1.0, -1.0, 1.0, -1.0] The coefficients of Legendre polynomials can be recovered using degree-`n` Taylor expansion:: >>> for n in range(5): ... nprint(chop(taylor(lambda x: legendre(n, x), 0, n))) ... [1.0] [0.0, 1.0] [-0.5, 0.0, 1.5] [0.0, -1.5, 0.0, 2.5] [0.375, 0.0, -3.75, 0.0, 4.375] The roots of Legendre polynomials are located symmetrically on the interval `[-1, 1]`:: >>> for n in range(5): ... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1])) ... [] [0.0] [-0.57735, 0.57735] [-0.774597, 0.0, 0.774597] [-0.861136, -0.339981, 0.339981, 0.861136] An example of an evaluation for arbitrary `n`:: >>> legendre(0.75, 2+4j) (1.94952805264875 + 2.1071073099422j) **Orthogonality** The Legendre polynomials are orthogonal on `[-1, 1]` with respect to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)` integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`:: >>> m, n = 3, 4 >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) 0.0 >>> m, n = 4, 4 >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) 0.222222222222222 **Differential equation** The Legendre polynomials satisfy the differential equation .. math :: ((1-x^2) y')' + n(n+1) y' = 0. We can verify this numerically:: >>> n = 3.6 >>> x = 0.73 >>> P = legendre >>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x) >>> B = n*(n+1)*P(n,x) >>> nprint(A+B,1) 9.0e-16 """ legenp = r""" Calculates the (associated) Legendre function of the first kind of degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary Legendre function of the first kind, `P_n(z)`. The parameters may be complex numbers. In terms of the Gauss hypergeometric function, the (associated) Legendre function is defined as .. math :: P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}} \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). With *type=3* instead of *type=2*, the alternative definition .. math :: \hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}} \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). is used. These functions correspond respectively to ``LegendreP[n,m,2,z]`` and ``LegendreP[n,m,3,z]`` in Mathematica. The general solution of the (associated) Legendre differential equation .. math :: (1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0 is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants `C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the second kind as implemented by :func:`~mpmath.legenq`. **Examples** Evaluation for arbitrary parameters and arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> legenp(2, 0, 10); legendre(2, 10) 149.5 149.5 >>> legenp(-2, 0.5, 2.5) (1.972260393822275434196053 - 1.972260393822275434196053j) >>> legenp(2+3j, 1-j, -0.5+4j) (-3.335677248386698208736542 - 5.663270217461022307645625j) >>> chop(legenp(3, 2, -1.5, type=2)) 28.125 >>> chop(legenp(3, 2, -1.5, type=3)) -28.125 Verifying the associated Legendre differential equation:: >>> n, m = 2, -0.5 >>> C1, C2 = 1, -3 >>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z) >>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \ ... (n*(n+1)-m**2/(1-z**2))*f(z) >>> for z in [0, 2, -1.5, 0.5+2j]: ... chop(deq(mpmathify(z))) ... 0.0 0.0 0.0 0.0 """ legenq = r""" Calculates the (associated) Legendre function of the second kind of degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary Legendre function of the second kind, `Q_n(z)`. The parameters may be complex numbers. The Legendre functions of the second kind give a second set of solutions to the (associated) Legendre differential equation. (See :func:`~mpmath.legenp`.) Unlike the Legendre functions of the first kind, they are not polynomials of `z` for integer `n`, `m` but rational or logarithmic functions with poles at `z = \pm 1`. There are various ways to define Legendre functions of the second kind, giving rise to different complex structure. A version can be selected using the *type* keyword argument. The *type=2* and *type=3* functions are given respectively by .. math :: Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)} \left( \cos(\pi m) P_n^m(z) - \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right) \hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m} \left( \hat{P}_n^m(z) - \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right) where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions of the first kind. The formulas above should be understood as limits when `m` is an integer. These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``) and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function is essentially the same as the function defined in Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead of `(z^2-1)^{m/2}`, giving slightly different branches. **Examples** Evaluation for arbitrary parameters and arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> legenq(2, 0, 0.5) -0.8186632680417568557122028 >>> legenq(-1.5, -2, 2.5) (0.6655964618250228714288277 + 0.3937692045497259717762649j) >>> legenq(2-j, 3+4j, -6+5j) (-10001.95256487468541686564 - 6011.691337610097577791134j) Different versions of the function:: >>> legenq(2, 1, 0.5) 0.7298060598018049369381857 >>> legenq(2, 1, 1.5) (-7.902916572420817192300921 + 0.1998650072605976600724502j) >>> legenq(2, 1, 0.5, type=3) (2.040524284763495081918338 - 0.7298060598018049369381857j) >>> chop(legenq(2, 1, 1.5, type=3)) -0.1998650072605976600724502 """ chebyt = r""" ``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first kind `T_n(x)`, defined by the identity .. math :: T_n(\cos x) = \cos(n x). The Chebyshev polynomials of the first kind are a special case of the Jacobi polynomials, and by extension of the hypergeometric function `\,_2F_1`. They can thus also be evaluated for nonintegral `n`. **Plots** .. literalinclude :: /plots/chebyt.py .. image :: /plots/chebyt.png **Basic evaluation** The coefficients of the `n`-th polynomial can be recovered using using degree-`n` Taylor expansion:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(5): ... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n))) ... [1.0] [0.0, 1.0] [-1.0, 0.0, 2.0] [0.0, -3.0, 0.0, 4.0] [1.0, 0.0, -8.0, 0.0, 8.0] **Orthogonality** The Chebyshev polynomials of the first kind are orthogonal on the interval `[-1, 1]` with respect to the weight function `w(x) = 1/\sqrt{1-x^2}`:: >>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2) >>> m, n = 3, 4 >>> nprint(quad(f, [-1, 1]),1) 0.0 >>> m, n = 4, 4 >>> quad(f, [-1, 1]) 1.57079632596448 """ chebyu = r""" ``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second kind `U_n(x)`, defined by the identity .. math :: U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}. The Chebyshev polynomials of the second kind are a special case of the Jacobi polynomials, and by extension of the hypergeometric function `\,_2F_1`. They can thus also be evaluated for nonintegral `n`. **Plots** .. literalinclude :: /plots/chebyu.py .. image :: /plots/chebyu.png **Basic evaluation** The coefficients of the `n`-th polynomial can be recovered using using degree-`n` Taylor expansion:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(5): ... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n))) ... [1.0] [0.0, 2.0] [-1.0, 0.0, 4.0] [0.0, -4.0, 0.0, 8.0] [1.0, 0.0, -12.0, 0.0, 16.0] **Orthogonality** The Chebyshev polynomials of the second kind are orthogonal on the interval `[-1, 1]` with respect to the weight function `w(x) = \sqrt{1-x^2}`:: >>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2) >>> m, n = 3, 4 >>> quad(f, [-1, 1]) 0.0 >>> m, n = 4, 4 >>> quad(f, [-1, 1]) 1.5707963267949 """ besselj = r""" ``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind `J_n(x)`. Bessel functions of the first kind are defined as solutions of the differential equation .. math :: x^2 y'' + x y' + (x^2 - n^2) y = 0 which appears, among other things, when solving the radial part of Laplace's equation in cylindrical coordinates. This equation has two solutions for given `n`, where the `J_n`-function is the solution that is nonsingular at `x = 0`. For positive integer `n`, `J_n(x)` behaves roughly like a sine (odd `n`) or cosine (even `n`) multiplied by a magnitude factor that decays slowly as `x \to \pm\infty`. Generally, `J_n` is a special case of the hypergeometric function `\,_0F_1`: .. math :: J_n(x) = \frac{x^n}{2^n \Gamma(n+1)} \,_0F_1\left(n+1,-\frac{x^2}{4}\right) With *derivative* = `m \ne 0`, the `m`-th derivative .. math :: \frac{d^m}{dx^m} J_n(x) is computed. **Plots** .. literalinclude :: /plots/besselj.py .. image :: /plots/besselj.png .. literalinclude :: /plots/besselj_c.py .. image :: /plots/besselj_c.png **Examples** Evaluation is supported for arbitrary arguments, and at arbitrary precision:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> besselj(2, 1000) -0.024777229528606 >>> besselj(4, 0.75) 0.000801070086542314 >>> besselj(2, 1000j) (-2.48071721019185e+432 + 6.41567059811949e-437j) >>> mp.dps = 25 >>> besselj(0.75j, 3+4j) (-2.778118364828153309919653 - 1.5863603889018621585533j) >>> mp.dps = 50 >>> besselj(1, pi) 0.28461534317975275734531059968613140570981118184947 Arguments may be large:: >>> mp.dps = 25 >>> besselj(0, 10000) -0.007096160353388801477265164 >>> besselj(0, 10**10) 0.000002175591750246891726859055 >>> besselj(2, 10**100) 7.337048736538615712436929e-51 >>> besselj(2, 10**5*j) (-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j) The Bessel functions of the first kind satisfy simple symmetries around `x = 0`:: >>> mp.dps = 15 >>> nprint([besselj(n,0) for n in range(5)]) [1.0, 0.0, 0.0, 0.0, 0.0] >>> nprint([besselj(n,pi) for n in range(5)]) [-0.304242, 0.284615, 0.485434, 0.333458, 0.151425] >>> nprint([besselj(n,-pi) for n in range(5)]) [-0.304242, -0.284615, 0.485434, -0.333458, 0.151425] Roots of Bessel functions are often used:: >>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]]) [2.40483, 5.52008, 8.65373, 11.7915, 14.9309] >>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]]) [3.83171, 7.01559, 10.1735, 13.3237, 16.4706] The roots are not periodic, but the distance between successive roots asymptotically approaches `2 \pi`. Bessel functions of the first kind have the following normalization:: >>> quadosc(j0, [0, inf], period=2*pi) 1.0 >>> quadosc(j1, [0, inf], period=2*pi) 1.0 For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a trigonometric function:: >>> x = 10 >>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x) (-0.13726373575505, -0.13726373575505) >>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x) (-0.211708866331398, -0.211708866331398) Derivatives of any order can be computed (negative orders correspond to integration):: >>> mp.dps = 25 >>> besselj(0, 7.5, 1) -0.1352484275797055051822405 >>> diff(lambda x: besselj(0,x), 7.5) -0.1352484275797055051822405 >>> besselj(0, 7.5, 10) -0.1377811164763244890135677 >>> diff(lambda x: besselj(0,x), 7.5, 10) -0.1377811164763244890135677 >>> besselj(0,7.5,-1) - besselj(0,3.5,-1) -0.1241343240399987693521378 >>> quad(j0, [3.5, 7.5]) -0.1241343240399987693521378 Differentiation with a noninteger order gives the fractional derivative in the sense of the Riemann-Liouville differintegral, as computed by :func:`~mpmath.differint`:: >>> mp.dps = 15 >>> besselj(1, 3.5, 0.75) -0.385977722939384 >>> differint(lambda x: besselj(1, x), 3.5, 0.75) -0.385977722939384 """ besseli = r""" ``besseli(n, x, derivative=0)`` gives the modified Bessel function of the first kind, .. math :: I_n(x) = i^{-n} J_n(ix). With *derivative* = `m \ne 0`, the `m`-th derivative .. math :: \frac{d^m}{dx^m} I_n(x) is computed. **Plots** .. literalinclude :: /plots/besseli.py .. image :: /plots/besseli.png .. literalinclude :: /plots/besseli_c.py .. image :: /plots/besseli_c.png **Examples** Some values of `I_n(x)`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> besseli(0,0) 1.0 >>> besseli(1,0) 0.0 >>> besseli(0,1) 1.266065877752008335598245 >>> besseli(3.5, 2+3j) (-0.2904369752642538144289025 - 0.4469098397654815837307006j) Arguments may be large:: >>> besseli(2, 1000) 2.480717210191852440616782e+432 >>> besseli(2, 10**10) 4.299602851624027900335391e+4342944813 >>> besseli(2, 6000+10000j) (-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j) For integers `n`, the following integral representation holds:: >>> mp.dps = 15 >>> n = 3 >>> x = 2.3 >>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi 0.349223221159309 >>> besseli(n,x) 0.349223221159309 Derivatives and antiderivatives of any order can be computed:: >>> mp.dps = 25 >>> besseli(2, 7.5, 1) 195.8229038931399062565883 >>> diff(lambda x: besseli(2,x), 7.5) 195.8229038931399062565883 >>> besseli(2, 7.5, 10) 153.3296508971734525525176 >>> diff(lambda x: besseli(2,x), 7.5, 10) 153.3296508971734525525176 >>> besseli(2,7.5,-1) - besseli(2,3.5,-1) 202.5043900051930141956876 >>> quad(lambda x: besseli(2,x), [3.5, 7.5]) 202.5043900051930141956876 """ bessely = r""" ``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind, .. math :: Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}. For `n` an integer, this formula should be understood as a limit. With *derivative* = `m \ne 0`, the `m`-th derivative .. math :: \frac{d^m}{dx^m} Y_n(x) is computed. **Plots** .. literalinclude :: /plots/bessely.py .. image :: /plots/bessely.png .. literalinclude :: /plots/bessely_c.py .. image :: /plots/bessely_c.png **Examples** Some values of `Y_n(x)`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> bessely(0,0), bessely(1,0), bessely(2,0) (-inf, -inf, -inf) >>> bessely(1, pi) 0.3588729167767189594679827 >>> bessely(0.5, 3+4j) (9.242861436961450520325216 - 3.085042824915332562522402j) Arguments may be large:: >>> bessely(0, 10000) 0.00364780555898660588668872 >>> bessely(2.5, 10**50) -4.8952500412050989295774e-26 >>> bessely(2.5, -10**50) (0.0 + 4.8952500412050989295774e-26j) Derivatives and antiderivatives of any order can be computed:: >>> bessely(2, 3.5, 1) 0.3842618820422660066089231 >>> diff(lambda x: bessely(2, x), 3.5) 0.3842618820422660066089231 >>> bessely(0.5, 3.5, 1) -0.2066598304156764337900417 >>> diff(lambda x: bessely(0.5, x), 3.5) -0.2066598304156764337900417 >>> diff(lambda x: bessely(2, x), 0.5, 10) -208173867409.5547350101511 >>> bessely(2, 0.5, 10) -208173867409.5547350101511 >>> bessely(2, 100.5, 100) 0.02668487547301372334849043 >>> quad(lambda x: bessely(2,x), [1,3]) -1.377046859093181969213262 >>> bessely(2,3,-1) - bessely(2,1,-1) -1.377046859093181969213262 """ besselk = r""" ``besselk(n, x)`` gives the modified Bessel function of the second kind, .. math :: K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)} For `n` an integer, this formula should be understood as a limit. **Plots** .. literalinclude :: /plots/besselk.py .. image :: /plots/besselk.png .. literalinclude :: /plots/besselk_c.py .. image :: /plots/besselk_c.png **Examples** Evaluation is supported for arbitrary complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> besselk(0,1) 0.4210244382407083333356274 >>> besselk(0, -1) (0.4210244382407083333356274 - 3.97746326050642263725661j) >>> besselk(3.5, 2+3j) (-0.02090732889633760668464128 + 0.2464022641351420167819697j) >>> besselk(2+3j, 0.5) (0.9615816021726349402626083 + 0.1918250181801757416908224j) Arguments may be large:: >>> besselk(0, 100) 4.656628229175902018939005e-45 >>> besselk(1, 10**6) 4.131967049321725588398296e-434298 >>> besselk(1, 10**6*j) (0.001140348428252385844876706 - 0.0005200017201681152909000961j) >>> besselk(4.5, fmul(10**50, j, exact=True)) (1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j) The point `x = 0` is a singularity (logarithmic if `n = 0`):: >>> besselk(0,0) +inf >>> besselk(1,0) +inf >>> for n in range(-4, 5): ... print(besselk(n, '1e-1000')) ... 4.8e+4001 8.0e+3000 2.0e+2000 1.0e+1000 2302.701024509704096466802 1.0e+1000 2.0e+2000 8.0e+3000 4.8e+4001 """ hankel1 = r""" ``hankel1(n,x)`` computes the Hankel function of the first kind, which is the complex combination of Bessel functions given by .. math :: H_n^{(1)}(x) = J_n(x) + i Y_n(x). **Plots** .. literalinclude :: /plots/hankel1.py .. image :: /plots/hankel1.png .. literalinclude :: /plots/hankel1_c.py .. image :: /plots/hankel1_c.png **Examples** The Hankel function is generally complex-valued:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hankel1(2, pi) (0.4854339326315091097054957 - 0.0999007139290278787734903j) >>> hankel1(3.5, pi) (0.2340002029630507922628888 - 0.6419643823412927142424049j) """ hankel2 = r""" ``hankel2(n,x)`` computes the Hankel function of the second kind, which is the complex combination of Bessel functions given by .. math :: H_n^{(2)}(x) = J_n(x) - i Y_n(x). **Plots** .. literalinclude :: /plots/hankel2.py .. image :: /plots/hankel2.png .. literalinclude :: /plots/hankel2_c.py .. image :: /plots/hankel2_c.png **Examples** The Hankel function is generally complex-valued:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> hankel2(2, pi) (0.4854339326315091097054957 + 0.0999007139290278787734903j) >>> hankel2(3.5, pi) (0.2340002029630507922628888 + 0.6419643823412927142424049j) """ lambertw = r""" The Lambert W function `W(z)` is defined as the inverse function of `w \exp(w)`. In other words, the value of `W(z)` is such that `z = W(z) \exp(W(z))` for any complex number `z`. The Lambert W function is a multivalued function with infinitely many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch gives a different solution `w` of the equation `z = w \exp(w)`. All branches are supported by :func:`~mpmath.lambertw`: * ``lambertw(z)`` gives the principal solution (branch 0) * ``lambertw(z, k)`` gives the solution on branch `k` The Lambert W function has two partially real branches: the principal branch (`k = 0`) is real for real `z > -1/e`, and the `k = -1` branch is real for `-1/e < z < 0`. All branches except `k = 0` have a logarithmic singularity at `z = 0`. The definition, implementation and choice of branches is based on [Corless]_. **Plots** .. literalinclude :: /plots/lambertw.py .. image :: /plots/lambertw.png .. literalinclude :: /plots/lambertw_c.py .. image :: /plots/lambertw_c.png **Basic examples** The Lambert W function is the inverse of `w \exp(w)`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> w = lambertw(1) >>> w 0.5671432904097838729999687 >>> w*exp(w) 1.0 Any branch gives a valid inverse:: >>> w = lambertw(1, k=3) >>> w (-2.853581755409037807206819 + 17.11353553941214591260783j) >>> w = lambertw(1, k=25) >>> w (-5.047020464221569709378686 + 155.4763860949415867162066j) >>> chop(w*exp(w)) 1.0 **Applications to equation-solving** The Lambert W function may be used to solve various kinds of equations, such as finding the value of the infinite power tower `z^{z^{z^{\ldots}}}`:: >>> def tower(z, n): ... if n == 0: ... return z ... return z ** tower(z, n-1) ... >>> tower(mpf(0.5), 100) 0.6411857445049859844862005 >>> -lambertw(-log(0.5))/log(0.5) 0.6411857445049859844862005 **Properties** The Lambert W function grows roughly like the natural logarithm for large arguments:: >>> lambertw(1000); log(1000) 5.249602852401596227126056 6.907755278982137052053974 >>> lambertw(10**100); log(10**100) 224.8431064451185015393731 230.2585092994045684017991 The principal branch of the Lambert W function has a rational Taylor series expansion around `z = 0`:: >>> nprint(taylor(lambertw, 0, 6), 10) [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8] Some special values and limits are:: >>> lambertw(0) 0.0 >>> lambertw(1) 0.5671432904097838729999687 >>> lambertw(e) 1.0 >>> lambertw(inf) +inf >>> lambertw(0, k=-1) -inf >>> lambertw(0, k=3) -inf >>> lambertw(inf, k=2) (+inf + 12.56637061435917295385057j) >>> lambertw(inf, k=3) (+inf + 18.84955592153875943077586j) >>> lambertw(-inf, k=3) (+inf + 21.9911485751285526692385j) The `k = 0` and `k = -1` branches join at `z = -1/e` where `W(z) = -1` for both branches. Since `-1/e` can only be represented approximately with binary floating-point numbers, evaluating the Lambert W function at this point only gives `-1` approximately:: >>> lambertw(-1/e, 0) -0.9999999999998371330228251 >>> lambertw(-1/e, -1) -1.000000000000162866977175 If `-1/e` happens to round in the negative direction, there might be a small imaginary part:: >>> mp.dps = 15 >>> lambertw(-1/e) (-1.0 + 8.22007971483662e-9j) >>> lambertw(-1/e+eps) -0.999999966242188 **References** 1. [Corless]_ """ barnesg = r""" Evaluates the Barnes G-function, which generalizes the superfactorial (:func:`~mpmath.superfac`) and by extension also the hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers in an analogous way to how the gamma function generalizes the ordinary factorial. The Barnes G-function may be defined in terms of a Weierstrass product: .. math :: G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2} \prod_{n=1}^\infty \left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right] For positive integers `n`, we have have relation to superfactorials `G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`. **Examples** Some elementary values and limits of the Barnes G-function:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> barnesg(1), barnesg(2), barnesg(3) (1.0, 1.0, 1.0) >>> barnesg(4) 2.0 >>> barnesg(5) 12.0 >>> barnesg(6) 288.0 >>> barnesg(7) 34560.0 >>> barnesg(8) 24883200.0 >>> barnesg(inf) +inf >>> barnesg(0), barnesg(-1), barnesg(-2) (0.0, 0.0, 0.0) Closed-form values are known for some rational arguments:: >>> barnesg('1/2') 0.603244281209446 >>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3) 0.603244281209446 >>> barnesg('1/4') 0.29375596533861 >>> nthroot(exp('3/8')/exp(catalan/pi)/ ... gamma(0.25)**3/sqrt(glaisher)**9, 4) 0.29375596533861 The Barnes G-function satisfies the functional equation `G(z+1) = \Gamma(z) G(z)`:: >>> z = pi >>> barnesg(z+1) 2.39292119327948 >>> gamma(z)*barnesg(z) 2.39292119327948 The asymptotic growth rate of the Barnes G-function is related to the Glaisher-Kinkelin constant:: >>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)* ... (2*pi)**(n/2)*exp(-3*n**2/4)), inf) 0.847536694177301 >>> exp('1/12')/glaisher 0.847536694177301 The Barnes G-function can be differentiated in closed form:: >>> z = 3 >>> diff(barnesg, z) 0.264507203401607 >>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2) 0.264507203401607 Evaluation is supported for arbitrary arguments and at arbitrary precision:: >>> barnesg(6.5) 2548.7457695685 >>> barnesg(-pi) 0.00535976768353037 >>> barnesg(3+4j) (-0.000676375932234244 - 4.42236140124728e-5j) >>> mp.dps = 50 >>> barnesg(1/sqrt(2)) 0.81305501090451340843586085064413533788206204124732 >>> q = barnesg(10j) >>> q.real 0.000000000021852360840356557241543036724799812371995850552234 >>> q.imag -0.00000000000070035335320062304849020654215545839053210041457588 >>> mp.dps = 15 >>> barnesg(100) 3.10361006263698e+6626 >>> barnesg(-101) 0.0 >>> barnesg(-10.5) 5.94463017605008e+25 >>> barnesg(-10000.5) -6.14322868174828e+167480422 >>> barnesg(1000j) (5.21133054865546e-1173597 + 4.27461836811016e-1173597j) >>> barnesg(-1000+1000j) (2.43114569750291e+1026623 + 2.24851410674842e+1026623j) **References** 1. Whittaker & Watson, *A Course of Modern Analysis*, Cambridge University Press, 4th edition (1927), p.264 2. http://en.wikipedia.org/wiki/Barnes_G-function 3. http://mathworld.wolfram.com/BarnesG-Function.html """ superfac = r""" Computes the superfactorial, defined as the product of consecutive factorials .. math :: \mathrm{sf}(n) = \prod_{k=1}^n k! For general complex `z`, `\mathrm{sf}(z)` is defined in terms of the Barnes G-function (see :func:`~mpmath.barnesg`). **Examples** The first few superfactorials are (OEIS A000178):: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(10): ... print("%s %s" % (n, superfac(n))) ... 0 1.0 1 1.0 2 2.0 3 12.0 4 288.0 5 34560.0 6 24883200.0 7 125411328000.0 8 5.05658474496e+15 9 1.83493347225108e+21 Superfactorials grow very rapidly:: >>> superfac(1000) 3.24570818422368e+1177245 >>> superfac(10**10) 2.61398543581249e+467427913956904067453 Evaluation is supported for arbitrary arguments:: >>> mp.dps = 25 >>> superfac(pi) 17.20051550121297985285333 >>> superfac(2+3j) (-0.005915485633199789627466468 + 0.008156449464604044948738263j) >>> diff(superfac, 1) 0.2645072034016070205673056 **References** 1. http://oeis.org/A000178 """ hyperfac = r""" Computes the hyperfactorial, defined for integers as the product .. math :: H(n) = \prod_{k=1}^n k^k. The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`. It can be defined more generally in terms of the Barnes G-function (see :func:`~mpmath.barnesg`) and the gamma function by the formula .. math :: H(z) = \frac{\Gamma(z+1)^z}{G(z)}. The extension to complex numbers can also be done via the integral representation .. math :: H(z) = (2\pi)^{-z/2} \exp \left[ {z+1 \choose 2} + \int_0^z \log(t!)\,dt \right]. **Examples** The rapidly-growing sequence of hyperfactorials begins (OEIS A002109):: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(10): ... print("%s %s" % (n, hyperfac(n))) ... 0 1.0 1 1.0 2 4.0 3 108.0 4 27648.0 5 86400000.0 6 4031078400000.0 7 3.3197663987712e+18 8 5.56964379417266e+25 9 2.15779412229419e+34 Some even larger hyperfactorials are:: >>> hyperfac(1000) 5.46458120882585e+1392926 >>> hyperfac(10**10) 4.60408207642219e+489142638002418704309 The hyperfactorial can be evaluated for arbitrary arguments:: >>> hyperfac(0.5) 0.880449235173423 >>> diff(hyperfac, 1) 0.581061466795327 >>> hyperfac(pi) 205.211134637462 >>> hyperfac(-10+1j) (3.01144471378225e+46 - 2.45285242480185e+46j) The recurrence property of the hyperfactorial holds generally:: >>> z = 3-4*j >>> hyperfac(z) (-4.49795891462086e-7 - 6.33262283196162e-7j) >>> z**z * hyperfac(z-1) (-4.49795891462086e-7 - 6.33262283196162e-7j) >>> z = mpf(-0.6) >>> chop(z**z * hyperfac(z-1)) 1.28170142849352 >>> hyperfac(z) 1.28170142849352 The hyperfactorial may also be computed using the integral definition:: >>> z = 2.5 >>> hyperfac(z) 15.9842119922237 >>> (2*pi)**(-z/2)*exp(binomial(z+1,2) + ... quad(lambda t: loggamma(t+1), [0, z])) 15.9842119922237 :func:`~mpmath.hyperfac` supports arbitrary-precision evaluation:: >>> mp.dps = 50 >>> hyperfac(10) 215779412229418562091680268288000000000000000.0 >>> hyperfac(1/sqrt(2)) 0.89404818005227001975423476035729076375705084390942 **References** 1. http://oeis.org/A002109 2. http://mathworld.wolfram.com/Hyperfactorial.html """ rgamma = r""" Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This function evaluates to zero at the poles of the gamma function, `z = 0, -1, -2, \ldots`. **Examples** Basic examples:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> rgamma(1) 1.0 >>> rgamma(4) 0.1666666666666666666666667 >>> rgamma(0); rgamma(-1) 0.0 0.0 >>> rgamma(1000) 2.485168143266784862783596e-2565 >>> rgamma(inf) 0.0 A definite integral that can be evaluated in terms of elementary integrals:: >>> quad(rgamma, [0,inf]) 2.807770242028519365221501 >>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf]) 2.807770242028519365221501 """ loggamma = r""" Computes the principal branch of the log-gamma function, `\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many complex branch cuts, the principal log-gamma function only has a single branch cut along the negative half-axis. The principal branch continuously matches the asymptotic Stirling expansion .. math :: \ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} + \left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}). The real parts of both functions agree, but their imaginary parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`. They coincide for `z \in \mathbb{R}, z > 0`. Computationally, it is advantageous to use :func:`~mpmath.loggamma` instead of :func:`~mpmath.gamma` for extremely large arguments. **Examples** Comparing with `\ln(\Gamma(z))`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> loggamma('13.2'); log(gamma('13.2')) 20.49400419456603678498394 20.49400419456603678498394 >>> loggamma(3+4j) (-1.756626784603784110530604 + 4.742664438034657928194889j) >>> log(gamma(3+4j)) (-1.756626784603784110530604 - 1.540520869144928548730397j) >>> log(gamma(3+4j)) + 2*pi*j (-1.756626784603784110530604 + 4.742664438034657928194889j) Note the imaginary parts for negative arguments:: >>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5) (1.265512123484645396488946 - 3.141592653589793238462643j) (0.8600470153764810145109327 - 6.283185307179586476925287j) (-0.05624371649767405067259453 - 9.42477796076937971538793j) Some special values:: >>> loggamma(1); loggamma(2) 0.0 0.0 >>> loggamma(3); +ln2 0.6931471805599453094172321 0.6931471805599453094172321 >>> loggamma(3.5); log(15*sqrt(pi)/8) 1.200973602347074224816022 1.200973602347074224816022 >>> loggamma(inf) +inf Huge arguments are permitted:: >>> loggamma('1e30') 6.807755278982137052053974e+31 >>> loggamma('1e300') 6.897755278982137052053974e+302 >>> loggamma('1e3000') 6.906755278982137052053974e+3003 >>> loggamma('1e100000000000000000000') 2.302585092994045684007991e+100000000000000000020 >>> loggamma('1e30j') (-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j) >>> loggamma('1e300j') (-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j) >>> loggamma('1e3000j') (-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j) The log-gamma function can be integrated analytically on any interval of unit length:: >>> z = 0 >>> quad(loggamma, [z,z+1]); log(2*pi)/2 0.9189385332046727417803297 0.9189385332046727417803297 >>> z = 3+4j >>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2 (-0.9619286014994750641314421 + 5.219637303741238195688575j) (-0.9619286014994750641314421 + 5.219637303741238195688575j) The derivatives of the log-gamma function are given by the polygamma function (:func:`~mpmath.psi`):: >>> diff(loggamma, -4+3j); psi(0, -4+3j) (1.688493531222971393607153 + 2.554898911356806978892748j) (1.688493531222971393607153 + 2.554898911356806978892748j) >>> diff(loggamma, -4+3j, 2); psi(1, -4+3j) (-0.1539414829219882371561038 - 0.1020485197430267719746479j) (-0.1539414829219882371561038 - 0.1020485197430267719746479j) The log-gamma function satisfies an additive form of the recurrence relation for the ordinary gamma function:: >>> z = 2+3j >>> loggamma(z); loggamma(z+1) - log(z) (-2.092851753092733349564189 + 2.302396543466867626153708j) (-2.092851753092733349564189 + 2.302396543466867626153708j) """ siegeltheta = r""" Computes the Riemann-Siegel theta function, .. math :: \theta(t) = \frac{ \log\Gamma\left(\frac{1+2it}{4}\right) - \log\Gamma\left(\frac{1-2it}{4}\right) }{2i} - \frac{\log \pi}{2} t. The Riemann-Siegel theta function is important in providing the phase factor for the Z-function (see :func:`~mpmath.siegelz`). Evaluation is supported for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> siegeltheta(0) 0.0 >>> siegeltheta(inf) +inf >>> siegeltheta(-inf) -inf >>> siegeltheta(1) -1.767547952812290388302216 >>> siegeltheta(10+0.25j) (-3.068638039426838572528867 + 0.05804937947429712998395177j) Arbitrary derivatives may be computed with derivative = k >>> siegeltheta(1234, derivative=2) 0.0004051864079114053109473741 >>> diff(siegeltheta, 1234, n=2) 0.0004051864079114053109473741 The Riemann-Siegel theta function has odd symmetry around `t = 0`, two local extreme points and three real roots including 0 (located symmetrically):: >>> nprint(chop(taylor(siegeltheta, 0, 5))) [0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218] >>> findroot(diffun(siegeltheta), 7) 6.28983598883690277966509 >>> findroot(siegeltheta, 20) 17.84559954041086081682634 For large `t`, there is a famous asymptotic formula for `\theta(t)`, to first order given by:: >>> t = mpf(10**6) >>> siegeltheta(t) 5488816.353078403444882823 >>> -t*log(2*pi/t)/2-t/2 5488816.745777464310273645 """ grampoint = r""" Gives the `n`-th Gram point `g_n`, defined as the solution to the equation `\theta(g_n) = \pi n` where `\theta(t)` is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`). The first few Gram points are:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> grampoint(0) 17.84559954041086081682634 >>> grampoint(1) 23.17028270124630927899664 >>> grampoint(2) 27.67018221781633796093849 >>> grampoint(3) 31.71797995476405317955149 Checking the definition:: >>> siegeltheta(grampoint(3)) 9.42477796076937971538793 >>> 3*pi 9.42477796076937971538793 A large Gram point:: >>> grampoint(10**10) 3293531632.728335454561153 Gram points are useful when studying the Z-function (:func:`~mpmath.siegelz`). See the documentation of that function for additional examples. :func:`~mpmath.grampoint` can solve the defining equation for nonintegral `n`. There is a fixed point where `g(x) = x`:: >>> findroot(lambda x: grampoint(x) - x, 10000) 9146.698193171459265866198 **References** 1. http://mathworld.wolfram.com/GramPoint.html """ siegelz = r""" Computes the Z-function, also known as the Riemann-Siegel Z function, .. math :: Z(t) = e^{i \theta(t)} \zeta(1/2+it) where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`) and where `\theta(t)` denotes the Riemann-Siegel theta function (see :func:`~mpmath.siegeltheta`). Evaluation is supported for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> siegelz(1) -0.7363054628673177346778998 >>> siegelz(3+4j) (-0.1852895764366314976003936 - 0.2773099198055652246992479j) The first four derivatives are supported, using the optional *derivative* keyword argument:: >>> siegelz(1234567, derivative=3) 56.89689348495089294249178 >>> diff(siegelz, 1234567, n=3) 56.89689348495089294249178 The Z-function has a Maclaurin expansion:: >>> nprint(chop(taylor(siegelz, 0, 4))) [-1.46035, 0.0, 2.73588, 0.0, -8.39357] The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the critical line `s = 1/2+it` (i.e. for real arguments `t` to `Z`). Its zeros coincide with those of the Riemann zeta function:: >>> findroot(siegelz, 14) 14.13472514173469379045725 >>> findroot(siegelz, 20) 21.02203963877155499262848 >>> findroot(zeta, 0.5+14j) (0.5 + 14.13472514173469379045725j) >>> findroot(zeta, 0.5+20j) (0.5 + 21.02203963877155499262848j) Since the Z-function is real-valued on the critical line (and unlike `|\zeta(s)|` analytic), it is useful for investigating the zeros of the Riemann zeta function. For example, one can use a root-finding algorithm based on sign changes:: >>> findroot(siegelz, [100, 200], solver='bisect') 176.4414342977104188888926 To locate roots, Gram points `g_n` which can be computed by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is positive for two consecutive `n`, then `Z(t)` must have a zero between those points:: >>> g10 = grampoint(10) >>> g11 = grampoint(11) >>> (-1)**10 * siegelz(g10) > 0 True >>> (-1)**11 * siegelz(g11) > 0 True >>> findroot(siegelz, [g10, g11], solver='bisect') 56.44624769706339480436776 >>> g10, g11 (54.67523744685325626632663, 57.54516517954725443703014) """ riemannr = r""" Evaluates the Riemann R function, a smooth approximation of the prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann R function gives a fast numerical approximation useful e.g. to roughly estimate the number of primes in a given interval. The Riemann R function is computed using the rapidly convergent Gram series, .. math :: R(x) = 1 + \sum_{k=1}^{\infty} \frac{\log^k x}{k k! \zeta(k+1)}. From the Gram series, one sees that the Riemann R function is a well-defined analytic function (except for a branch cut along the negative real half-axis); it can be evaluated for arbitrary real or complex arguments. The Riemann R function gives a very accurate approximation of the prime counting function. For example, it is wrong by at most 2 for `x < 1000`, and for `x = 10^9` differs from the exact value of `\pi(x)` by 79, or less than two parts in a million. It is about 10 times more accurate than the logarithmic integral estimate (see :func:`~mpmath.li`), which however is even faster to evaluate. It is orders of magnitude more accurate than the extremely fast `x/\log x` estimate. **Examples** For small arguments, the Riemann R function almost exactly gives the prime counting function if rounded to the nearest integer:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> primepi(50), riemannr(50) (15, 14.9757023241462) >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100)) 1 >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300)) 2 The Riemann R function can be evaluated for arguments far too large for exact determination of `\pi(x)` to be computationally feasible with any presently known algorithm:: >>> riemannr(10**30) 1.46923988977204e+28 >>> riemannr(10**100) 4.3619719871407e+97 >>> riemannr(10**1000) 4.3448325764012e+996 A comparison of the Riemann R function and logarithmic integral estimates for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`. The fractional error is shown in parentheses:: >>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534] >>> for n, p in enumerate(exact): ... n += 1 ... r, l = riemannr(10**n), li(10**n) ... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3) ... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr)) ... 1 4 4.56458314100509(0.141) 6.1655995047873(0.541) 2 25 25.6616332669242(0.0265) 30.1261415840796(0.205) 3 168 168.359446281167(0.00214) 177.609657990152(0.0572) 4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139) 5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394) 6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165) 7 664579 664667.447564748(0.000133) 664918.405048569(0.000511) 8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131) 9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5) The derivative of the Riemann R function gives the approximate probability for a number of magnitude `x` to be prime:: >>> diff(riemannr, 1000) 0.141903028110784 >>> mpf(primepi(1050) - primepi(950)) / 100 0.15 Evaluation is supported for arbitrary arguments and at arbitrary precision:: >>> mp.dps = 30 >>> riemannr(7.5) 3.72934743264966261918857135136 >>> riemannr(-4+2j) (-0.551002208155486427591793957644 + 2.16966398138119450043195899746j) """ primepi = r""" Evaluates the prime counting function, `\pi(x)`, which gives the number of primes less than or equal to `x`. The argument `x` may be fractional. The prime counting function is very expensive to evaluate precisely for large `x`, and the present implementation is not optimized in any way. For numerical approximation of the prime counting function, it is better to use :func:`~mpmath.primepi2` or :func:`~mpmath.riemannr`. Some values of the prime counting function:: >>> from mpmath import * >>> [primepi(k) for k in range(20)] [0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8] >>> primepi(3.5) 2 >>> primepi(100000) 9592 """ primepi2 = r""" Returns an interval (as an ``mpi`` instance) providing bounds for the value of the prime counting function `\pi(x)`. For small `x`, :func:`~mpmath.primepi2` returns an exact interval based on the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval based on Schoenfeld's inequality .. math :: |\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi} is returned. This estimate is rigorous assuming the truth of the Riemann hypothesis, and can be computed very quickly. **Examples** Exact values of the prime counting function for small `x`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> iv.dps = 15; iv.pretty = True >>> primepi2(10) [4.0, 4.0] >>> primepi2(100) [25.0, 25.0] >>> primepi2(1000) [168.0, 168.0] Loose intervals are generated for moderately large `x`: >>> primepi2(10000), primepi(10000) ([1209.0, 1283.0], 1229) >>> primepi2(50000), primepi(50000) ([5070.0, 5263.0], 5133) As `x` increases, the absolute error gets worse while the relative error improves. The exact value of `\pi(10^{23})` is 1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant digits:: >>> p = primepi2(10**23) >>> p [1.9253203909477020467e+21, 1.925320392280406229e+21] >>> mpf(p.delta) / mpf(p.a) 6.9219865355293e-10 A more precise, nonrigorous estimate for `\pi(x)` can be obtained using the Riemann R function (:func:`~mpmath.riemannr`). For large enough `x`, the value returned by :func:`~mpmath.primepi2` essentially amounts to a small perturbation of the value returned by :func:`~mpmath.riemannr`:: >>> primepi2(10**100) [4.3619719871407024816e+97, 4.3619719871407032404e+97] >>> riemannr(10**100) 4.3619719871407e+97 """ primezeta = r""" Computes the prime zeta function, which is defined in analogy with the Riemann zeta function (:func:`~mpmath.zeta`) as .. math :: P(s) = \sum_p \frac{1}{p^s} where the sum is taken over all prime numbers `p`. Although this sum only converges for `\mathrm{Re}(s) > 1`, the function is defined by analytic continuation in the half-plane `\mathrm{Re}(s) > 0`. **Examples** Arbitrary-precision evaluation for real and complex arguments is supported:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> primezeta(2) 0.452247420041065498506543364832 >>> primezeta(pi) 0.15483752698840284272036497397 >>> mp.dps = 50 >>> primezeta(3) 0.17476263929944353642311331466570670097541212192615 >>> mp.dps = 20 >>> primezeta(3+4j) (-0.12085382601645763295 - 0.013370403397787023602j) The prime zeta function has a logarithmic pole at `s = 1`, with residue equal to the difference of the Mertens and Euler constants:: >>> primezeta(1) +inf >>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps) -0.31571845205389007685 >>> mertens-euler -0.31571845205389007685 The analytic continuation to `0 < \mathrm{Re}(s) \le 1` is implemented. In this strip the function exhibits very complex behavior; on the unit interval, it has poles at `1/n` for every squarefree integer `n`:: >>> primezeta(0.5) # Pole at s = 1/2 (-inf + 3.1415926535897932385j) >>> primezeta(0.25) (-1.0416106801757269036 + 0.52359877559829887308j) >>> primezeta(0.5+10j) (0.54892423556409790529 + 0.45626803423487934264j) Although evaluation works in principle for any `\mathrm{Re}(s) > 0`, it should be noted that the evaluation time increases exponentially as `s` approaches the imaginary axis. For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`:: >>> primezeta(inf) 0.0 >>> primezeta(10), mpf(2)**-10 (0.00099360357443698021786, 0.0009765625) >>> primezeta(1000) 9.3326361850321887899e-302 >>> primezeta(1000+1000j) (-3.8565440833654995949e-302 - 8.4985390447553234305e-302j) **References** Carl-Erik Froberg, "On the prime zeta function", BIT 8 (1968), pp. 187-202. """ bernpoly = r""" Evaluates the Bernoulli polynomial `B_n(z)`. The first few Bernoulli polynomials are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(6): ... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n))) ... [1.0] [-0.5, 1.0] [0.166667, -1.0, 1.0] [0.0, 0.5, -1.5, 1.0] [-0.0333333, 0.0, 1.0, -2.0, 1.0] [0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0] At `z = 0`, the Bernoulli polynomial evaluates to a Bernoulli number (see :func:`~mpmath.bernoulli`):: >>> bernpoly(12, 0), bernoulli(12) (-0.253113553113553, -0.253113553113553) >>> bernpoly(13, 0), bernoulli(13) (0.0, 0.0) Evaluation is accurate for large `n` and small `z`:: >>> mp.dps = 25 >>> bernpoly(100, 0.5) 2.838224957069370695926416e+78 >>> bernpoly(1000, 10.5) 5.318704469415522036482914e+1769 """ polylog = r""" Computes the polylogarithm, defined by the sum .. math :: \mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}. This series is convergent only for `|z| < 1`, so elsewhere the analytic continuation is implied. The polylogarithm should not be confused with the logarithmic integral (also denoted by Li or li), which is implemented as :func:`~mpmath.li`. **Examples** The polylogarithm satisfies a huge number of functional identities. A sample of polylogarithm evaluations is shown below:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> polylog(1,0.5), log(2) (0.693147180559945, 0.693147180559945) >>> polylog(2,0.5), (pi**2-6*log(2)**2)/12 (0.582240526465012, 0.582240526465012) >>> polylog(2,-phi), -log(phi)**2-pi**2/10 (-1.21852526068613, -1.21852526068613) >>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6 (0.53721319360804, 0.53721319360804) :func:`~mpmath.polylog` can evaluate the analytic continuation of the polylogarithm when `s` is an integer:: >>> polylog(2, 10) (0.536301287357863 - 7.23378441241546j) >>> polylog(2, -10) -4.1982778868581 >>> polylog(2, 10j) (-3.05968879432873 + 3.71678149306807j) >>> polylog(-2, 10) -0.150891632373114 >>> polylog(-2, -10) 0.067618332081142 >>> polylog(-2, 10j) (0.0384353698579347 + 0.0912451798066779j) Some more examples, with arguments on the unit circle (note that the series definition cannot be used for computation here):: >>> polylog(2,j) (-0.205616758356028 + 0.915965594177219j) >>> j*catalan-pi**2/48 (-0.205616758356028 + 0.915965594177219j) >>> polylog(3,exp(2*pi*j/3)) (-0.534247512515375 + 0.765587078525922j) >>> -4*zeta(3)/9 + 2*j*pi**3/81 (-0.534247512515375 + 0.765587078525921j) Polylogarithms of different order are related by integration and differentiation:: >>> s, z = 3, 0.5 >>> polylog(s+1, z) 0.517479061673899 >>> quad(lambda t: polylog(s,t)/t, [0, z]) 0.517479061673899 >>> z*diff(lambda t: polylog(s+2,t), z) 0.517479061673899 Taylor series expansions around `z = 0` are:: >>> for n in range(-3, 4): ... nprint(taylor(lambda x: polylog(n,x), 0, 5)) ... [0.0, 1.0, 8.0, 27.0, 64.0, 125.0] [0.0, 1.0, 4.0, 9.0, 16.0, 25.0] [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] [0.0, 1.0, 1.0, 1.0, 1.0, 1.0] [0.0, 1.0, 0.5, 0.333333, 0.25, 0.2] [0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04] [0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008] The series defining the polylogarithm is simultaneously a Taylor series and an L-series. For certain values of `z`, the polylogarithm reduces to a pure zeta function:: >>> polylog(pi, 1), zeta(pi) (1.17624173838258, 1.17624173838258) >>> polylog(pi, -1), -altzeta(pi) (-0.909670702980385, -0.909670702980385) Evaluation for arbitrary, nonintegral `s` is supported for `z` within the unit circle: >>> polylog(3+4j, 0.25) (0.24258605789446 - 0.00222938275488344j) >>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf]) (0.24258605789446 - 0.00222938275488344j) It is also currently supported outside of the unit circle for `z` not too large in magnitude:: >>> polylog(1+j, 20+40j) (-7.1421172179728 - 3.92726697721369j) >>> polylog(1+j, 200+400j) Traceback (most recent call last): ... NotImplementedError: polylog for arbitrary s and z **References** 1. Richard Crandall, "Note on fast polylogarithm computation" http://people.reed.edu/~crandall/papers/Polylog.pdf 2. http://en.wikipedia.org/wiki/Polylogarithm 3. http://mathworld.wolfram.com/Polylogarithm.html """ bell = r""" For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell polynomial `B_n(x)`, the first few of which are .. math :: B_0(x) = 1 B_1(x) = x B_2(x) = x^2+x B_3(x) = x^3+3x^2+x If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it gives the `n`-th Bell number `B_n`, which is the number of partitions of a set with `n` elements. By setting the precision to at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast calculation of exact Bell numbers. In general, :func:`~mpmath.bell` computes .. math :: B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right) where `E_n(x)` is the generalized exponential function implemented by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1], where the modification is the sinc term ensuring that `B_n(x)` is continuous in `n`; :func:`~mpmath.bell` can thus be evaluated, differentiated, etc for arbitrary complex arguments. **Examples** Simple evaluations:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> bell(0, 2.5) 1.0 >>> bell(1, 2.5) 2.5 >>> bell(2, 2.5) 8.75 Evaluation for arbitrary complex arguments:: >>> bell(5.75+1j, 2-3j) (-10767.71345136587098445143 - 15449.55065599872579097221j) The first few Bell polynomials:: >>> for k in range(7): ... nprint(taylor(lambda x: bell(k,x), 0, k)) ... [1.0] [0.0, 1.0] [0.0, 1.0, 1.0] [0.0, 1.0, 3.0, 1.0] [0.0, 1.0, 7.0, 6.0, 1.0] [0.0, 1.0, 15.0, 25.0, 10.0, 1.0] [0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0] The first few Bell numbers and complementary Bell numbers:: >>> [int(bell(k)) for k in range(10)] [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147] >>> [int(bell(k,-1)) for k in range(10)] [1, -1, 0, 1, 1, -2, -9, -9, 50, 267] Large Bell numbers:: >>> mp.dps = 50 >>> bell(50) 185724268771078270438257767181908917499221852770.0 >>> bell(50,-1) -29113173035759403920216141265491160286912.0 Some even larger values:: >>> mp.dps = 25 >>> bell(1000,-1) -1.237132026969293954162816e+1869 >>> bell(1000) 2.989901335682408421480422e+1927 >>> bell(1000,2) 6.591553486811969380442171e+1987 >>> bell(1000,100.5) 9.101014101401543575679639e+2529 A determinant identity satisfied by Bell numbers:: >>> mp.dps = 15 >>> N = 8 >>> det([[bell(k+j) for j in range(N)] for k in range(N)]) 125411328000.0 >>> superfac(N-1) 125411328000.0 **References** 1. http://mathworld.wolfram.com/DobinskisFormula.html """ polyexp = r""" Evaluates the polyexponential function, defined for arbitrary complex `s`, `z` by the series .. math :: E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k. `E_s(z)` is constructed from the exponential function analogously to how the polylogarithm is constructed from the ordinary logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series It is an entire function of both `s` and `z`. The polyexponential function provides a generalization of the Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`. In terms of the Bell polynomials, .. math :: E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s). Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n` is a nonzero integer, but not otherwise. In particular, they differ at `n = 0`. **Examples** Evaluating a series:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> nsum(lambda k: sqrt(k)/fac(k), [1,inf]) 2.101755547733791780315904 >>> polyexp(0.5,1) 2.101755547733791780315904 Evaluation for arbitrary arguments:: >>> polyexp(-3-4j, 2.5+2j) (2.351660261190434618268706 + 1.202966666673054671364215j) Evaluation is accurate for tiny function values:: >>> polyexp(4, -100) 3.499471750566824369520223e-36 If `n` is a nonpositive integer, `E_n` reduces to a special instance of the hypergeometric function `\,_pF_q`:: >>> n = 3 >>> x = pi >>> polyexp(-n,x) 4.042192318847986561771779 >>> x*hyper([1]*(n+1), [2]*(n+1), x) 4.042192318847986561771779 """ cyclotomic = r""" Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by .. math :: \Phi_n(x) = \prod_{\zeta} (x - \zeta) where `\zeta` ranges over all primitive `n`-th roots of unity (see :func:`~mpmath.unitroots`). An equivalent representation, used for computation, is .. math :: \Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x) where `\mu(m)` denotes the Moebius function. The cyclotomic polynomials are integer polynomials, the first of which can be written explicitly as .. math :: \Phi_0(x) = 1 \Phi_1(x) = x - 1 \Phi_2(x) = x + 1 \Phi_3(x) = x^3 + x^2 + 1 \Phi_4(x) = x^2 + 1 \Phi_5(x) = x^4 + x^3 + x^2 + x + 1 \Phi_6(x) = x^2 - x + 1 **Examples** The coefficients of low-order cyclotomic polynomials can be recovered using Taylor expansion:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> for n in range(9): ... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10)) ... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)]))) ... 0 [1.0] 1 [-1.0, 1.0] 2 [1.0, 1.0] 3 [1.0, 1.0, 1.0] 4 [1.0, 0.0, 1.0] 5 [1.0, 1.0, 1.0, 1.0, 1.0] 6 [1.0, -1.0, 1.0] 7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 8 [1.0, 0.0, 0.0, 0.0, 1.0] The definition as a product over primitive roots may be checked by computing the product explicitly (for a real argument, this method will generally introduce numerical noise in the imaginary part):: >>> mp.dps = 25 >>> z = 3+4j >>> cyclotomic(10, z) (-419.0 - 360.0j) >>> fprod(z-r for r in unitroots(10, primitive=True)) (-419.0 - 360.0j) >>> z = 3 >>> cyclotomic(10, z) 61.0 >>> fprod(z-r for r in unitroots(10, primitive=True)) (61.0 - 3.146045605088568607055454e-25j) Up to permutation, the roots of a given cyclotomic polynomial can be checked to agree with the list of primitive roots:: >>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3] >>> for r in polyroots(p[::-1]): ... print(r) ... (0.5 - 0.8660254037844386467637232j) (0.5 + 0.8660254037844386467637232j) >>> >>> for r in unitroots(6, primitive=True): ... print(r) ... (0.5 + 0.8660254037844386467637232j) (0.5 - 0.8660254037844386467637232j) """ meijerg = r""" Evaluates the Meijer G-function, defined as .. math :: G^{m,n}_{p,q} \left( \left. \begin{matrix} a_1, \dots, a_n ; a_{n+1} \dots a_p \\ b_1, \dots, b_m ; b_{m+1} \dots b_q \end{matrix}\; \right| \; z ; r \right) = \frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)} {\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)} z^{-s/r} ds for an appropriate choice of the contour `L` (see references). There are `p` elements `a_j`. The argument *a_s* should be a pair of lists, the first containing the `n` elements `a_1, \ldots, a_n` and the second containing the `p-n` elements `a_{n+1}, \ldots a_p`. There are `q` elements `b_j`. The argument *b_s* should be a pair of lists, the first containing the `m` elements `b_1, \ldots, b_m` and the second containing the `q-m` elements `b_{m+1}, \ldots b_q`. The implicit tuple `(m, n, p, q)` constitutes the order or degree of the Meijer G-function, and is determined by the lengths of the coefficient vectors. Confusingly, the indices in this tuple appear in a different order from the coefficients, but this notation is standard. The many examples given below should hopefully clear up any potential confusion. **Algorithm** The Meijer G-function is evaluated as a combination of hypergeometric series. There are two versions of the function, which can be selected with the optional *series* argument. *series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z` *series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z` The default series is chosen based on the degree and `|z|` in order to be consistent with Mathematica's. This definition of the Meijer G-function has a discontinuity at `|z| = 1` for some orders, which can be avoided by explicitly specifying a series. Keyword arguments are forwarded to :func:`~mpmath.hypercomb`. **Examples** Many standard functions are special cases of the Meijer G-function (possibly rescaled and/or with branch cut corrections). We define some test parameters:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a = mpf(0.75) >>> b = mpf(1.5) >>> z = mpf(2.25) The exponential function: `e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \; \right| \; -z \right)` >>> meijerg([[],[]], [[0],[]], -z) 9.487735836358525720550369 >>> exp(z) 9.487735836358525720550369 The natural logarithm: `\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0 \end{matrix} \; \right| \; -z \right)` >>> meijerg([[1,1],[]], [[1],[0]], z) 1.178654996341646117219023 >>> log(1+z) 1.178654996341646117219023 A rational function: `\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1 \end{matrix} \; \right| \; z \right)` >>> meijerg([[1,1],[]], [[1],[1]], z) 0.6923076923076923076923077 >>> z/(z+1) 0.6923076923076923076923077 The sine and cosine functions: `\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} - \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)` `\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} - \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)` >>> meijerg([[],[]], [[0.5],[0]], (z/2)**2) 0.4389807929218676682296453 >>> sin(z)/sqrt(pi) 0.4389807929218676682296453 >>> meijerg([[],[]], [[0],[0.5]], (z/2)**2) -0.3544090145996275423331762 >>> cos(z)/sqrt(pi) -0.3544090145996275423331762 Bessel functions: `J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} \end{matrix} \; \right| \; z \right)` `Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left. \begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2} \end{matrix} \; \right| \; z \right)` `(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} \end{matrix} \; \right| \; -z \right)` `2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left. \begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} \end{matrix} \; \right| \; z \right)` As the example with the Bessel *I* function shows, a branch factor is required for some arguments when inverting the square root. >>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2) 0.5059425789597154858527264 >>> besselj(a,z) 0.5059425789597154858527264 >>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2) 0.1853868950066556941442559 >>> bessely(a, z) 0.1853868950066556941442559 >>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2) (0.8685913322427653875717476 + 2.096964974460199200551738j) >>> (-z)**(a/2) / z**(a/2) * besseli(a, z) (0.8685913322427653875717476 + 2.096964974460199200551738j) >>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2) 0.09334163695597828403796071 >>> besselk(a,z) 0.09334163695597828403796071 Error functions: `\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left. \begin{matrix} a \\ a-1, a-\frac{1}{2} \end{matrix} \; \right| \; z, \frac{1}{2} \right)` >>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5) 0.00172839843123091957468712 >>> sqrt(pi) * z**(2*a-2) * erfc(z) 0.00172839843123091957468712 A Meijer G-function of higher degree, (1,1,2,3): >>> meijerg([[a],[b]], [[a],[b,a-1]], z) 1.55984467443050210115617 >>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1) 1.55984467443050210115617 A Meijer G-function of still higher degree, (4,1,2,4), that can be expanded as a messy combination of exponential integrals: >>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z) 0.3323667133658557271898061 >>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\ ... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z))) 0.3323667133658557271898061 In the following case, different series give different values:: >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2)) -0.06417628097442437076207337 >>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1) 0.1428699426155117511873047 >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2)) -0.06417628097442437076207337 **References** 1. http://en.wikipedia.org/wiki/Meijer_G-function 2. http://mathworld.wolfram.com/MeijerG-Function.html 3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/ 4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/ """ clsin = r""" Computes the Clausen sine function, defined formally by the series .. math :: \mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}. The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical "Clausen function". More generally, the Clausen function is defined for complex `s` and `z`, even when the series does not converge. The Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as .. math :: \mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) - \mathrm{Li}_s\left(e^{-iz}\right)\right) = \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}), and this representation can be taken to provide the analytic continuation of the series. The complementary function :func:`~mpmath.clcos` gives the corresponding cosine sum. **Examples** Evaluation for arbitrarily chosen `s` and `z`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> s, z = 3, 4 >>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf]) -0.6533010136329338746275795 -0.6533010136329338746275795 Using `z + \pi` instead of `z` gives an alternating series:: >>> clsin(s, z+pi) 0.8860032351260589402871624 >>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf]) 0.8860032351260589402871624 With `s = 1`, the sum can be expressed in closed form using elementary functions:: >>> z = 1 + sqrt(3) >>> clsin(1, z) 0.2047709230104579724675985 >>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j)) 0.2047709230104579724675985 >>> nsum(lambda k: sin(k*z)/k, [1,inf]) 0.2047709230104579724675985 The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for `0 < \theta < 2 \pi`:: >>> cl2 = lambda t: clsin(2, t) >>> cl2(3.5) -0.2465045302347694216534255 >>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5]) -0.2465045302347694216534255 This function is symmetric about `\theta = \pi` with zeros and extreme points:: >>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi)) 0.0 1.014941606409653625021203 0.0 -1.014941606409653625021203 0.0 Catalan's constant is a special value:: >>> cl2(pi/2) 0.9159655941772190150546035 >>> +catalan 0.9159655941772190150546035 The Clausen sine function can be expressed in closed form when `s` is an odd integer (becoming zero when `s` < 0):: >>> z = 1 + sqrt(2) >>> clsin(1, z); (pi-z)/2 0.3636895456083490948304773 0.3636895456083490948304773 >>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12 0.5661751584451144991707161 0.5661751584451144991707161 >>> clsin(-1, z) 0.0 >>> clsin(-3, z) 0.0 It can also be expressed in closed form for even integer `s \le 0`, providing a finite sum for series such as `\sin(z) + \sin(2z) + \sin(3z) + \ldots`:: >>> z = 1 + sqrt(2) >>> clsin(0, z) 0.1903105029507513881275865 >>> cot(z/2)/2 0.1903105029507513881275865 >>> clsin(-2, z) -0.1089406163841548817581392 >>> -cot(z/2)*csc(z/2)**2/4 -0.1089406163841548817581392 Call with ``pi=True`` to multiply `z` by `\pi` exactly:: >>> clsin(3, 3*pi) -8.892316224968072424732898e-26 >>> clsin(3, 3, pi=True) 0.0 Evaluation for complex `s`, `z` in a nonconvergent case:: >>> s, z = -1-j, 1+2j >>> clsin(s, z) (-0.593079480117379002516034 + 0.9038644233367868273362446j) >>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf]) (-0.593079480117379002516034 + 0.9038644233367868273362446j) """ clcos = r""" Computes the Clausen cosine function, defined formally by the series .. math :: \mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}. This function is complementary to the Clausen sine function :func:`~mpmath.clsin`. In terms of the polylogarithm, .. math :: \mathrm{\widetilde{Cl}}_s(z) = \frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) + \mathrm{Li}_s\left(e^{-iz}\right)\right) = \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}). **Examples** Evaluation for arbitrarily chosen `s` and `z`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> s, z = 3, 4 >>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf]) -0.6518926267198991308332759 -0.6518926267198991308332759 Using `z + \pi` instead of `z` gives an alternating series:: >>> s, z = 3, 0.5 >>> clcos(s, z+pi) -0.8155530586502260817855618 >>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf]) -0.8155530586502260817855618 With `s = 1`, the sum can be expressed in closed form using elementary functions:: >>> z = 1 + sqrt(3) >>> clcos(1, z) -0.6720334373369714849797918 >>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z)))) -0.6720334373369714849797918 >>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real -0.6720334373369714849797918 >>> nsum(lambda k: cos(k*z)/k, [1,inf]) -0.6720334373369714849797918 It can also be expressed in closed form when `s` is an even integer. For example, >>> clcos(2,z) -0.7805359025135583118863007 >>> pi**2/6 - pi*z/2 + z**2/4 -0.7805359025135583118863007 The case `s = 0` gives the renormalized sum of `\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for any value of `z`):: >>> clcos(0, z) -0.5 >>> nsum(lambda k: cos(k*z), [1,inf]) -0.5 Also the sums .. math :: \cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots and .. math :: \cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots for higher integer powers `n = -s` can be done in closed form. They are zero when `n` is positive and even (`s` negative and even):: >>> clcos(-1, z); 1/(2*cos(z)-2) -0.2607829375240542480694126 -0.2607829375240542480694126 >>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8 0.1472635054979944390848006 0.1472635054979944390848006 >>> clcos(-2, z); clcos(-4, z); clcos(-6, z) 0.0 0.0 0.0 With `z = \pi`, the series reduces to that of the Riemann zeta function (more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta function values):: >>> clcos(2.5, 0); zeta(2.5) 1.34148725725091717975677 1.34148725725091717975677 >>> clcos(2.5, pi); -altzeta(2.5) -0.8671998890121841381913472 -0.8671998890121841381913472 Call with ``pi=True`` to multiply `z` by `\pi` exactly:: >>> clcos(-3, 2*pi) 2.997921055881167659267063e+102 >>> clcos(-3, 2, pi=True) 0.008333333333333333333333333 Evaluation for complex `s`, `z` in a nonconvergent case:: >>> s, z = -1-j, 1+2j >>> clcos(s, z) (0.9407430121562251476136807 + 0.715826296033590204557054j) >>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf]) (0.9407430121562251476136807 + 0.715826296033590204557054j) """ whitm = r""" Evaluates the Whittaker function `M(k,m,z)`, which gives a solution to the Whittaker differential equation .. math :: \frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+ \frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0. A second solution is given by :func:`~mpmath.whitw`. The Whittaker functions are defined in Abramowitz & Stegun, section 13.1. They are alternate forms of the confluent hypergeometric functions `\,_1F_1` and `U`: .. math :: M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} \,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z) W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} U(\tfrac{1}{2}+m-k, 1+2m, z). **Examples** Evaluation for arbitrary real and complex arguments is supported:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> whitm(1, 1, 1) 0.7302596799460411820509668 >>> whitm(1, 1, -1) (0.0 - 1.417977827655098025684246j) >>> whitm(j, j/2, 2+3j) (3.245477713363581112736478 - 0.822879187542699127327782j) >>> whitm(2, 3, 100000) 4.303985255686378497193063e+21707 Evaluation at zero:: >>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0) +inf nan 0.0 We can verify that :func:`~mpmath.whitm` numerically satisfies the differential equation for arbitrarily chosen values:: >>> k = mpf(0.25) >>> m = mpf(1.5) >>> f = lambda z: whitm(k,m,z) >>> for z in [-1, 2.5, 3, 1+2j]: ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) ... 0.0 0.0 0.0 0.0 An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`, verifying evaluation along the real axis:: >>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf]) 3.438869842576800225207341 >>> 128/(21*sqrt(pi)) 3.438869842576800225207341 """ whitw = r""" Evaluates the Whittaker function `W(k,m,z)`, which gives a second solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.) **Examples** Evaluation for arbitrary real and complex arguments is supported:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> whitw(1, 1, 1) 1.19532063107581155661012 >>> whitw(1, 1, -1) (-0.9424875979222187313924639 - 0.2607738054097702293308689j) >>> whitw(j, j/2, 2+3j) (0.1782899315111033879430369 - 0.01609578360403649340169406j) >>> whitw(2, 3, 100000) 1.887705114889527446891274e-21705 >>> whitw(-1, -1, 100) 1.905250692824046162462058e-24 Evaluation at zero:: >>> for m in [-1, -0.5, 0, 0.5, 1]: ... whitw(1, m, 0) ... +inf nan 0.0 nan +inf We can verify that :func:`~mpmath.whitw` numerically satisfies the differential equation for arbitrarily chosen values:: >>> k = mpf(0.25) >>> m = mpf(1.5) >>> f = lambda z: whitw(k,m,z) >>> for z in [-1, 2.5, 3, 1+2j]: ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) ... 0.0 0.0 0.0 0.0 """ ber = r""" Computes the Kelvin function ber, which for real arguments gives the real part of the Bessel J function of a rotated argument .. math :: J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x). The imaginary part is given by :func:`~mpmath.bei`. **Plots** .. literalinclude :: /plots/ber.py .. image :: /plots/ber.png **Examples** Verifying the defining relation:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> n, x = 2, 3.5 >>> ber(n,x) 1.442338852571888752631129 >>> bei(n,x) -0.948359035324558320217678 >>> besselj(n, x*root(1,8,3)) (1.442338852571888752631129 - 0.948359035324558320217678j) The ber and bei functions are also defined by analytic continuation for complex arguments:: >>> ber(1+j, 2+3j) (4.675445984756614424069563 - 15.84901771719130765656316j) >>> bei(1+j, 2+3j) (15.83886679193707699364398 + 4.684053288183046528703611j) """ bei = r""" Computes the Kelvin function bei, which for real arguments gives the imaginary part of the Bessel J function of a rotated argument. See :func:`~mpmath.ber`. """ ker = r""" Computes the Kelvin function ker, which for real arguments gives the real part of the (rescaled) Bessel K function of a rotated argument .. math :: e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x). The imaginary part is given by :func:`~mpmath.kei`. **Plots** .. literalinclude :: /plots/ker.py .. image :: /plots/ker.png **Examples** Verifying the defining relation:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> n, x = 2, 4.5 >>> ker(n,x) 0.02542895201906369640249801 >>> kei(n,x) -0.02074960467222823237055351 >>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1)) (0.02542895201906369640249801 - 0.02074960467222823237055351j) The ker and kei functions are also defined by analytic continuation for complex arguments:: >>> ker(1+j, 3+4j) (1.586084268115490421090533 - 2.939717517906339193598719j) >>> kei(1+j, 3+4j) (-2.940403256319453402690132 - 1.585621643835618941044855j) """ kei = r""" Computes the Kelvin function kei, which for real arguments gives the imaginary part of the (rescaled) Bessel K function of a rotated argument. See :func:`~mpmath.ker`. """ struveh = r""" Gives the Struve function .. math :: \,\mathbf{H}_n(z) = \sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2}) \Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1} which is a solution to the Struve differential equation .. math :: z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. **Examples** Evaluation for arbitrary real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> struveh(0, 3.5) 0.3608207733778295024977797 >>> struveh(-1, 10) -0.255212719726956768034732 >>> struveh(1, -100.5) 0.5819566816797362287502246 >>> struveh(2.5, 10000000000000) 3153915652525200060.308937 >>> struveh(2.5, -10000000000000) (0.0 - 3153915652525200060.308937j) >>> struveh(1+j, 1000000+4000000j) (-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j) A Struve function of half-integer order is elementary; for example: >>> z = 3 >>> struveh(0.5, 3) 0.9167076867564138178671595 >>> sqrt(2/(pi*z))*(1-cos(z)) 0.9167076867564138178671595 Numerically verifying the differential equation:: >>> z = mpf(4.5) >>> n = 3 >>> f = lambda z: struveh(n,z) >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z) >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi >>> lhs 17.40359302709875496632744 >>> rhs 17.40359302709875496632744 """ struvel = r""" Gives the modified Struve function .. math :: \,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z) which solves to the modified Struve differential equation .. math :: z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. **Examples** Evaluation for arbitrary real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> struvel(0, 3.5) 7.180846515103737996249972 >>> struvel(-1, 10) 2670.994904980850550721511 >>> struvel(1, -100.5) 1.757089288053346261497686e+42 >>> struvel(2.5, 10000000000000) 4.160893281017115450519948e+4342944819025 >>> struvel(2.5, -10000000000000) (0.0 - 4.160893281017115450519948e+4342944819025j) >>> struvel(1+j, 700j) (-0.1721150049480079451246076 + 0.1240770953126831093464055j) >>> struvel(1+j, 1000000+4000000j) (-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j) Numerically verifying the differential equation:: >>> z = mpf(3.5) >>> n = 3 >>> f = lambda z: struvel(n,z) >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z) >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi >>> lhs 6.368850306060678353018165 >>> rhs 6.368850306060678353018165 """ appellf1 = r""" Gives the Appell F1 hypergeometric function of two variables, .. math :: F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}} \frac{x^m y^n}{m! n!}. This series is only generally convergent when `|x| < 1` and `|y| < 1`, although :func:`~mpmath.appellf1` can evaluate an analytic continuation with respecto to either variable, and sometimes both. **Examples** Evaluation is supported for real and complex parameters:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> appellf1(1,0,0.5,1,0.5,0.25) 1.154700538379251529018298 >>> appellf1(1,1+j,0.5,1,0.5,0.5j) (1.138403860350148085179415 + 1.510544741058517621110615j) For some integer parameters, the F1 series reduces to a polynomial:: >>> appellf1(2,-4,-3,1,2,5) -816.0 >>> appellf1(-5,1,2,1,4,5) -20528.0 The analytic continuation with respect to either `x` or `y`, and sometimes with respect to both, can be evaluated:: >>> appellf1(2,3,4,5,100,0.5) (0.0006231042714165329279738662 + 0.0000005769149277148425774499857j) >>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j) (-0.1782604566893954897128702 + 0.002472407104546216117161499j) >>> appellf1(1,2,3,4,10,12) -0.07122993830066776374929313 For certain arguments, F1 reduces to an ordinary hypergeometric function:: >>> appellf1(1,2,3,5,0.5,0.25) 1.547902270302684019335555 >>> 4*hyp2f1(1,2,5,'1/3')/3 1.547902270302684019335555 >>> appellf1(1,2,3,4,0,1.5) (-1.717202506168937502740238 - 2.792526803190927323077905j) >>> hyp2f1(1,3,4,1.5) (-1.717202506168937502740238 - 2.792526803190927323077905j) The F1 function satisfies a system of partial differential equations:: >>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25]) >>> F = lambda x,y: appellf1(a,b1,b2,c,x,y) >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + ... y*(1-x)*diff(F,(x,y),(1,1)) + ... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - ... b1*y*diff(F,(x,y),(0,1)) - ... a*b1*F(x,y)) 0.0 >>> >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + ... x*(1-y)*diff(F,(x,y),(1,1)) + ... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - ... b2*x*diff(F,(x,y),(1,0)) - ... a*b2*F(x,y)) 0.0 The Appell F1 function allows for closed-form evaluation of various integrals, such as any integral of the form `\int x^r (x+a)^p (x+b)^q dx`:: >>> def integral(a,b,p,q,r,x1,x2): ... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2]) ... f = lambda x: x**r * (x+a)**p * (x+b)**q ... def F(x): ... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q ... v *= (1+x/a)**(-p) ... v *= (1+x/b)**(-q) ... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b) ... return v ... print("Num. quad: %s" % quad(f, [x1,x2])) ... print("Appell F1: %s" % (F(x2)-F(x1))) ... >>> integral('1/5','4/3','-2','3','1/2',0,1) Num. quad: 9.073335358785776206576981 Appell F1: 9.073335358785776206576981 >>> integral('3/2','4/3','-2','3','1/2',0,1) Num. quad: 1.092829171999626454344678 Appell F1: 1.092829171999626454344678 >>> integral('3/2','4/3','-2','3','1/2',12,25) Num. quad: 1106.323225040235116498927 Appell F1: 1106.323225040235116498927 Also incomplete elliptic integrals fall into this category [1]:: >>> def E(z, m): ... if (pi/2).ae(z): ... return ellipe(m) ... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\ ... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) ... >>> z, m = 1, 0.5 >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) 0.9273298836244400669659042 0.9273298836244400669659042 >>> z, m = 3, 2 >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) (1.057495752337234229715836 + 1.198140234735592207439922j) (1.057495752337234229715836 + 1.198140234735592207439922j) **References** 1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/ 2. [SrivastavaKarlsson]_ 3. [CabralRosetti]_ 4. [Vidunas]_ 5. [Slater]_ """ angerj = r""" Gives the Anger function .. math :: \mathbf{J}_{\nu}(z) = \frac{1}{\pi} \int_0^{\pi} \cos(\nu t - z \sin t) dt which is an entire function of both the parameter `\nu` and the argument `z`. It solves the inhomogeneous Bessel differential equation .. math :: f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) = \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu). **Examples** Evaluation for real and complex parameter and argument:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> angerj(2,3) 0.4860912605858910769078311 >>> angerj(-3+4j, 2+5j) (-5033.358320403384472395612 + 585.8011892476145118551756j) >>> angerj(3.25, 1e6j) (4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j) >>> angerj(-1.5, 1e6) 0.0002795719747073879393087011 The Anger function coincides with the Bessel J-function when `\nu` is an integer:: >>> angerj(1,3); besselj(1,3) 0.3390589585259364589255146 0.3390589585259364589255146 >>> angerj(1.5,3); besselj(1.5,3) 0.4088969848691080859328847 0.4777182150870917715515015 Verifying the differential equation:: >>> v,z = mpf(2.25), 0.75 >>> f = lambda z: angerj(v,z) >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) -0.6002108774380707130367995 >>> (z-v)/(pi*z**2) * sinpi(v) -0.6002108774380707130367995 Verifying the integral representation:: >>> angerj(v,z) 0.1145380759919333180900501 >>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi]) 0.1145380759919333180900501 **References** 1. [DLMF]_ section 11.10: Anger-Weber Functions """ webere = r""" Gives the Weber function .. math :: \mathbf{E}_{\nu}(z) = \frac{1}{\pi} \int_0^{\pi} \sin(\nu t - z \sin t) dt which is an entire function of both the parameter `\nu` and the argument `z`. It solves the inhomogeneous Bessel differential equation .. math :: f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) = -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)). **Examples** Evaluation for real and complex parameter and argument:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> webere(2,3) -0.1057668973099018425662646 >>> webere(-3+4j, 2+5j) (-585.8081418209852019290498 - 5033.314488899926921597203j) >>> webere(3.25, 1e6j) (-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j) >>> webere(3.25, 1e6) -0.00002812518265894315604914453 Up to addition of a rational function of `z`, the Weber function coincides with the Struve H-function when `\nu` is an integer:: >>> webere(1,3); 2/pi-struveh(1,3) -0.3834897968188690177372881 -0.3834897968188690177372881 >>> webere(5,3); 26/(35*pi)-struveh(5,3) 0.2009680659308154011878075 0.2009680659308154011878075 Verifying the differential equation:: >>> v,z = mpf(2.25), 0.75 >>> f = lambda z: webere(v,z) >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) -1.097441848875479535164627 >>> -(z+v+(z-v)*cospi(v))/(pi*z**2) -1.097441848875479535164627 Verifying the integral representation:: >>> webere(v,z) 0.1486507351534283744485421 >>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi]) 0.1486507351534283744485421 **References** 1. [DLMF]_ section 11.10: Anger-Weber Functions """ lommels1 = r""" Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}` .. math :: s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)} \,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2}; -\frac{z^2}{4} \right) which solves the inhomogeneous Bessel equation .. math :: z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}. A second solution is given by :func:`~mpmath.lommels2`. **Plots** .. literalinclude :: /plots/lommels1.py .. image :: /plots/lommels1.png **Examples** An integral representation:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> u,v,z = 0.25, 0.125, mpf(0.75) >>> lommels1(u,v,z) 0.4276243877565150372999126 >>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \ ... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2) 0.4276243877565150372999126 A special value:: >>> lommels1(v,v,z) 0.5461221367746048054932553 >>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z) 0.5461221367746048054932553 Verifying the differential equation:: >>> f = lambda z: lommels1(u,v,z) >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) 0.6979536443265746992059141 >>> z**(u+1) 0.6979536443265746992059141 **References** 1. [GradshteynRyzhik]_ 2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html """ lommels2 = r""" Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}` .. math :: S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1} \Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right) \Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times \left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) - \cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z) \right] which solves the same differential equation as :func:`~mpmath.lommels1`. **Plots** .. literalinclude :: /plots/lommels2.py .. image :: /plots/lommels2.png **Examples** For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> lommels2(10,2,30000) 1.968299831601008419949804e+40 >>> power(30000,9) 1.9683e+40 A special value:: >>> u,v,z = 0.5, 0.125, mpf(0.75) >>> lommels2(v,v,z) 0.9589683199624672099969765 >>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5) 0.9589683199624672099969765 Verifying the differential equation:: >>> f = lambda z: lommels2(u,v,z) >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) 0.6495190528383289850727924 >>> z**(u+1) 0.6495190528383289850727924 **References** 1. [GradshteynRyzhik]_ 2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html """ appellf2 = r""" Gives the Appell F2 hypergeometric function of two variables .. math :: F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n} \frac{x^m y^n}{m! n!}. The series is generally absolutely convergent for `|x| + |y| < 1`. **Examples** Evaluation for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> appellf2(1,2,3,4,5,0.25,0.125) 1.257417193533135344785602 >>> appellf2(1,-3,-4,2,3,2,3) -42.8 >>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25) (0.9880539519421899867041719 + 0.01497616165031102661476978j) >>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25)) 1.201311219287411337955192 >>> appellf2(1,1,1,4,6,0.125,16) (-0.09455532250274744282125152 - 0.7647282253046207836769297j) A transformation formula:: >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125]) >>> appellf2(a,b1,b2,c1,c2,x,y) 0.2299211717841180783309688 >>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x)) 0.2299211717841180783309688 A system of partial differential equations satisfied by F2:: >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625]) >>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y) >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - ... x*y*diff(F,(x,y),(1,1)) + ... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - ... b1*y*diff(F,(x,y),(0,1)) - ... a*b1*F(x,y)) 0.0 >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - ... x*y*diff(F,(x,y),(1,1)) + ... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - ... b2*x*diff(F,(x,y),(1,0)) - ... a*b2*F(x,y)) 0.0 **References** See references for :func:`~mpmath.appellf1`. """ appellf3 = r""" Gives the Appell F3 hypergeometric function of two variables .. math :: F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}} \frac{x^m y^n}{m! n!}. The series is generally absolutely convergent for `|x| < 1, |y| < 1`. **Examples** Evaluation for various parameters and variables:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> appellf3(1,2,3,4,5,0.5,0.25) 2.221557778107438938158705 >>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6) (-0.5189554589089861284537389 - 0.1454441043328607980769742j) (-0.5189554589089861284537389 - 0.1454441043328607980769742j) >>> appellf3(1,-2,-3,1,1,4,6) -17.4 >>> appellf3(1,2,-3,1,1,4,6) (17.7876136773677356641825 + 19.54768762233649126154534j) >>> appellf3(1,2,-3,1,1,6,4) (85.02054175067929402953645 + 148.4402528821177305173599j) >>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25)) 1.719992169545200286696007 Many transformations and evaluations for special combinations of the parameters are possible, e.g.: >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) >>> appellf3(a,c-a,b,c-b,c,x,y) 1.093432340896087107444363 >>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y) 1.093432340896087107444363 >>> x**2*appellf3(1,1,1,1,3,x,-x) 0.01568646277445385390945083 >>> polylog(2,x**2) 0.01568646277445385390945083 >>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125]) >>> appellf3(a1,a2,b1,b2,c,x,1) 1.03947361709111140096947 >>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x) 1.03947361709111140096947 The Appell F3 function satisfies a pair of partial differential equations:: >>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625]) >>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y) >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + ... y*diff(F,(x,y),(1,1)) + ... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) - ... a1*b1*F(x,y)) 0.0 >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + ... x*diff(F,(x,y),(1,1)) + ... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) - ... a2*b2*F(x,y)) 0.0 **References** See references for :func:`~mpmath.appellf1`. """ appellf4 = r""" Gives the Appell F4 hypergeometric function of two variables .. math :: F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n} \frac{x^m y^n}{m! n!}. The series is generally absolutely convergent for `\sqrt{|x|} + \sqrt{|y|} < 1`. **Examples** Evaluation for various parameters and arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> appellf4(1,1,2,2,0.25,0.125) 1.286182069079718313546608 >>> appellf4(-2,-3,4,5,4,5) 34.8 >>> appellf4(5,4,2,3,0.25j,-0.125j) (-0.2585967215437846642163352 + 2.436102233553582711818743j) Reduction to `\,_2F_1` in a special case:: >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) >>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x)) 1.129143488466850868248364 >>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y) 1.129143488466850868248364 A system of partial differential equations satisfied by F4:: >>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625]) >>> F = lambda x,y: appellf4(a,b,c1,c2,x,y) >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - ... y**2*diff(F,(x,y),(0,2)) - ... 2*x*y*diff(F,(x,y),(1,1)) + ... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) - ... ((a+b+1)*y)*diff(F,(x,y),(0,1)) - ... a*b*F(x,y)) 0.0 >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - ... x**2*diff(F,(x,y),(2,0)) - ... 2*x*y*diff(F,(x,y),(1,1)) + ... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) - ... ((a+b+1)*x)*diff(F,(x,y),(1,0)) - ... a*b*F(x,y)) 0.0 **References** See references for :func:`~mpmath.appellf1`. """ zeta = r""" Computes the Riemann zeta function .. math :: \zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots or, with `a \ne 1`, the more general Hurwitz zeta function .. math :: \zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}. Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with respect to `s`, .. math :: \zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}. Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz zeta functions are defined through analytic continuation for arbitrary complex `s \ne 1` (`s = 1` is a pole). The implementation uses three algorithms: the Borwein algorithm for the Riemann zeta function when `s` is close to the real line; the Riemann-Siegel formula for the Riemann zeta function when `s` is large imaginary, and Euler-Maclaurin summation in all other cases. The reflection formula for `\Re(s) < 0` is implemented in some cases. The algorithm can be chosen with ``method = 'borwein'``, ``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``. The parameter `a` is usually a rational number `a = p/q`, and may be specified as such by passing an integer tuple `(p, q)`. Evaluation is supported for arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for nonrational `a` or when computing derivatives. **Examples** Some values of the Riemann zeta function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> zeta(2); pi**2 / 6 1.644934066848226436472415 1.644934066848226436472415 >>> zeta(0) -0.5 >>> zeta(-1) -0.08333333333333333333333333 >>> zeta(-2) 0.0 For large positive `s`, `\zeta(s)` rapidly approaches 1:: >>> zeta(50) 1.000000000000000888178421 >>> zeta(100) 1.0 >>> zeta(inf) 1.0 >>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler 0.5772156649015328606065121 0.5772156649015328606065121 >>> nsum(lambda k: zeta(k)-1, [2, inf]) 1.0 Evaluation is supported for complex `s` and `a`: >>> zeta(-3+4j) (-0.03373057338827757067584698 + 0.2774499251557093745297677j) >>> zeta(2+3j, -1+j) (389.6841230140842816370741 + 295.2674610150305334025962j) The Riemann zeta function has so-called nontrivial zeros on the critical line `s = 1/2 + it`:: >>> findroot(zeta, 0.5+14j); zetazero(1) (0.5 + 14.13472514173469379045725j) (0.5 + 14.13472514173469379045725j) >>> findroot(zeta, 0.5+21j); zetazero(2) (0.5 + 21.02203963877155499262848j) (0.5 + 21.02203963877155499262848j) >>> findroot(zeta, 0.5+25j); zetazero(3) (0.5 + 25.01085758014568876321379j) (0.5 + 25.01085758014568876321379j) >>> chop(zeta(zetazero(10))) 0.0 Evaluation on and near the critical line is supported for large heights `t` by means of the Riemann-Siegel formula (currently for `a = 1`, `n \le 4`):: >>> zeta(0.5+100000j) (1.073032014857753132114076 + 5.780848544363503984261041j) >>> zeta(0.75+1000000j) (0.9535316058375145020351559 + 0.9525945894834273060175651j) >>> zeta(0.5+10000000j) (11.45804061057709254500227 - 8.643437226836021723818215j) >>> zeta(0.5+100000000j, derivative=1) (51.12433106710194942681869 + 43.87221167872304520599418j) >>> zeta(0.5+100000000j, derivative=2) (-444.2760822795430400549229 - 896.3789978119185981665403j) >>> zeta(0.5+100000000j, derivative=3) (3230.72682687670422215339 + 14374.36950073615897616781j) >>> zeta(0.5+100000000j, derivative=4) (-11967.35573095046402130602 - 218945.7817789262839266148j) >>> zeta(1+10000000j) # off the line (2.859846483332530337008882 + 0.491808047480981808903986j) >>> zeta(1+10000000j, derivative=1) (-4.333835494679647915673205 - 0.08405337962602933636096103j) >>> zeta(1+10000000j, derivative=4) (453.2764822702057701894278 - 581.963625832768189140995j) For investigation of the zeta function zeros, the Riemann-Siegel Z-function is often more convenient than working with the Riemann zeta function directly (see :func:`~mpmath.siegelz`). Some values of the Hurwitz zeta function:: >>> zeta(2, 3); -5./4 + pi**2/6 0.3949340668482264364724152 0.3949340668482264364724152 >>> zeta(2, (3,4)); pi**2 - 8*catalan 2.541879647671606498397663 2.541879647671606498397663 For positive integer values of `s`, the Hurwitz zeta function is equivalent to a polygamma function (except for a normalizing factor):: >>> zeta(4, (1,5)); psi(3, '1/5')/6 625.5408324774542966919938 625.5408324774542966919938 Evaluation of derivatives:: >>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2 (-2.675565317808456852310934 + 4.742664438034657928194889j) (-2.675565317808456852310934 + 4.742664438034657928194889j) >>> zeta(2, 1, 20) 2432902008176640000.000242 >>> zeta(3+4j, 5.5+2j, 4) (-0.140075548947797130681075 - 0.3109263360275413251313634j) >>> zeta(0.5+100000j, 1, 4) (-10407.16081931495861539236 + 13777.78669862804508537384j) >>> zeta(-100+0.5j, (1,3), derivative=4) (4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j) Generating a Taylor series at `s = 2` using derivatives:: >>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k)) ... 1.644934066848226436472415 * (s-2)^0 -0.9375482543158437537025741 * (s-2)^1 0.9946401171494505117104293 * (s-2)^2 -1.000024300473840810940657 * (s-2)^3 1.000061933072352565457512 * (s-2)^4 -1.000006869443931806408941 * (s-2)^5 1.000000173233769531820592 * (s-2)^6 -0.9999999569989868493432399 * (s-2)^7 0.9999999937218844508684206 * (s-2)^8 -0.9999999996355013916608284 * (s-2)^9 1.000000000004610645020747 * (s-2)^10 Evaluation at zero and for negative integer `s`:: >>> zeta(0, 10) -9.5 >>> zeta(-2, (2,3)); mpf(1)/81 0.01234567901234567901234568 0.01234567901234567901234568 >>> zeta(-3+4j, (5,4)) (0.2899236037682695182085988 + 0.06561206166091757973112783j) >>> zeta(-3.25, 1/pi) -0.0005117269627574430494396877 >>> zeta(-3.5, pi, 1) 11.156360390440003294709 >>> zeta(-100.5, (8,3)) -4.68162300487989766727122e+77 >>> zeta(-10.5, (-8,3)) (-0.01521913704446246609237979 + 29907.72510874248161608216j) >>> zeta(-1000.5, (-8,3)) (1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j) >>> zeta(-1+j, 3+4j) (-16.32988355630802510888631 - 22.17706465801374033261383j) >>> zeta(-1+j, 3+4j, 2) (32.48985276392056641594055 - 51.11604466157397267043655j) >>> diff(lambda s: zeta(s, 3+4j), -1+j, 2) (32.48985276392056641594055 - 51.11604466157397267043655j) **References** 1. http://mathworld.wolfram.com/RiemannZetaFunction.html 2. http://mathworld.wolfram.com/HurwitzZetaFunction.html 3. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf """ dirichlet = r""" Evaluates the Dirichlet L-function .. math :: L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}. where `\chi` is a periodic sequence of length `q` which should be supplied in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`. Strictly, `\chi` should be a Dirichlet character, but any periodic sequence will work. For example, ``dirichlet(s, [1])`` gives the ordinary Riemann zeta function and ``dirichlet(s, [-1,1])`` gives the alternating zeta function (Dirichlet eta function). Also the derivative with respect to `s` (currently only a first derivative) can be evaluated. **Examples** The ordinary Riemann zeta function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> dirichlet(3, [1]); zeta(3) 1.202056903159594285399738 1.202056903159594285399738 >>> dirichlet(1, [1]) +inf The alternating zeta function:: >>> dirichlet(1, [-1,1]); ln(2) 0.6931471805599453094172321 0.6931471805599453094172321 The following defines the Dirichlet beta function `\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies several values of this function:: >>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d) >>> B(0); 1./2 0.5 0.5 >>> B(1); pi/4 0.7853981633974483096156609 0.7853981633974483096156609 >>> B(2); +catalan 0.9159655941772190150546035 0.9159655941772190150546035 >>> B(2,1); diff(B, 2) 0.08158073611659279510291217 0.08158073611659279510291217 >>> B(-1,1); 2*catalan/pi 0.5831218080616375602767689 0.5831218080616375602767689 >>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2))) 0.3915943927068367764719453 0.3915943927068367764719454 >>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25))) 0.1929013167969124293631898 0.1929013167969124293631898 A custom L-series of period 3:: >>> dirichlet(2, [2,0,1]) 0.7059715047839078092146831 >>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \ ... nsum(lambda k: (3*k+2)**-2, [0,inf]) 0.7059715047839078092146831 """ coulombf = r""" Calculates the regular Coulomb wave function .. math :: F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz) where the normalization constant `C_l(\eta)` is as calculated by :func:`~mpmath.coulombc`. This function solves the differential equation .. math :: f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0. A second linearly independent solution is given by the irregular Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`) and thus the general solution is `f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary constants `C_1`, `C_2`. Physically, the Coulomb wave functions give the radial solution to the Schrodinger equation for a point particle in a `1/z` potential; `z` is then the radius and `l`, `\eta` are quantum numbers. The Coulomb wave functions with real parameters are defined in Abramowitz & Stegun, section 14. However, all parameters are permitted to be complex in this implementation (see references). **Plots** .. literalinclude :: /plots/coulombf.py .. image :: /plots/coulombf.png .. literalinclude :: /plots/coulombf_c.py .. image :: /plots/coulombf_c.png **Examples** Evaluation is supported for arbitrary magnitudes of `z`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> coulombf(2, 1.5, 3.5) 0.4080998961088761187426445 >>> coulombf(-2, 1.5, 3.5) 0.7103040849492536747533465 >>> coulombf(2, 1.5, '1e-10') 4.143324917492256448770769e-33 >>> coulombf(2, 1.5, 1000) 0.4482623140325567050716179 >>> coulombf(2, 1.5, 10**10) -0.066804196437694360046619 Verifying the differential equation:: >>> l, eta, z = 2, 3, mpf(2.75) >>> A, B = 1, 2 >>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z) >>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z)) 0.0 A Wronskian relation satisfied by the Coulomb wave functions:: >>> l = 2 >>> eta = 1.5 >>> F = lambda z: coulombf(l,eta,z) >>> G = lambda z: coulombg(l,eta,z) >>> for z in [3.5, -1, 2+3j]: ... chop(diff(F,z)*G(z) - F(z)*diff(G,z)) ... 1.0 1.0 1.0 Another Wronskian relation:: >>> F = coulombf >>> G = coulombg >>> for z in [3.5, -1, 2+3j]: ... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2)) ... 0.0 0.0 0.0 An integral identity connecting the regular and irregular wave functions:: >>> l, eta, z = 4+j, 2-j, 5+2j >>> coulombf(l,eta,z) + j*coulombg(l,eta,z) (0.7997977752284033239714479 + 0.9294486669502295512503127j) >>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta) >>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf]) (0.7997977752284033239714479 + 0.9294486669502295512503127j) Some test case with complex parameters, taken from Michel [2]:: >>> mp.dps = 15 >>> coulombf(1+0.1j, 50+50j, 100.156) (-1.02107292320897e+15 - 2.83675545731519e+15j) >>> coulombg(1+0.1j, 50+50j, 100.156) (2.83675545731519e+15 - 1.02107292320897e+15j) >>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j) (4.30566371247811e-14 - 9.03347835361657e-19j) >>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j) (778709182061.134 + 18418936.2660553j) The following reproduces a table in Abramowitz & Stegun, at twice the precision:: >>> mp.dps = 10 >>> eta = 2; z = 5 >>> for l in [5, 4, 3, 2, 1, 0]: ... print("%s %s %s" % (l, coulombf(l,eta,z), ... diff(lambda z: coulombf(l,eta,z), z))) ... 5 0.09079533488 0.1042553261 4 0.2148205331 0.2029591779 3 0.4313159311 0.320534053 2 0.7212774133 0.3952408216 1 0.9935056752 0.3708676452 0 1.143337392 0.2937960375 **References** 1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986. 2. N. Michel, "Precise Coulomb wave functions for a wide range of complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1 """ coulombg = r""" Calculates the irregular Coulomb wave function .. math :: G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)} where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi` and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`. See :func:`~mpmath.coulombf` for additional information. **Plots** .. literalinclude :: /plots/coulombg.py .. image :: /plots/coulombg.png .. literalinclude :: /plots/coulombg_c.py .. image :: /plots/coulombg_c.png **Examples** Evaluation is supported for arbitrary magnitudes of `z`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> coulombg(-2, 1.5, 3.5) 1.380011900612186346255524 >>> coulombg(2, 1.5, 3.5) 1.919153700722748795245926 >>> coulombg(-2, 1.5, '1e-10') 201126715824.7329115106793 >>> coulombg(-2, 1.5, 1000) 0.1802071520691149410425512 >>> coulombg(-2, 1.5, 10**10) 0.652103020061678070929794 The following reproduces a table in Abramowitz & Stegun, at twice the precision:: >>> mp.dps = 10 >>> eta = 2; z = 5 >>> for l in [1, 2, 3, 4, 5]: ... print("%s %s %s" % (l, coulombg(l,eta,z), ... -diff(lambda z: coulombg(l,eta,z), z))) ... 1 1.08148276 0.6028279961 2 1.496877075 0.5661803178 3 2.048694714 0.7959909551 4 3.09408669 1.731802374 5 5.629840456 4.549343289 Evaluation close to the singularity at `z = 0`:: >>> mp.dps = 15 >>> coulombg(0,10,1) 3088184933.67358 >>> coulombg(0,10,'1e-10') 5554866000719.8 >>> coulombg(0,10,'1e-100') 5554866221524.1 Evaluation with a half-integer value for `l`:: >>> coulombg(1.5, 1, 10) 0.852320038297334 """ coulombc = r""" Gives the normalizing Gamow constant for Coulomb wave functions, .. math :: C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) + \ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right), where the log gamma function with continuous imaginary part away from the negative half axis (see :func:`~mpmath.loggamma`) is implied. This function is used internally for the calculation of Coulomb wave functions, and automatically cached to make multiple evaluations with fixed `l`, `\eta` fast. """ ellipfun = r""" Computes any of the Jacobi elliptic functions, defined in terms of Jacobi theta functions as .. math :: \mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)} \frac{\vartheta_1(t,q)}{\vartheta_4(t,q)} \mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)} \frac{\vartheta_2(t,q)}{\vartheta_4(t,q)} \mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)} \frac{\vartheta_3(t,q)}{\vartheta_4(t,q)}, or more generally computes a ratio of two such functions. Here `t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see :func:`~mpmath.nome`). Optionally, you can specify the nome directly instead of `m` by passing ``q=<value>``, or you can directly specify the elliptic parameter `k` with ``k=<value>``. The first argument should be a two-character string specifying the function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These letters respectively denote the basic functions `\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`. The identifier specifies the ratio of two such functions. For example, ``'ns'`` identifies the function .. math :: \mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)} and ``'cd'`` identifies the function .. math :: \mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}. If called with only the first argument, a function object evaluating the chosen function for given arguments is returned. **Examples** Basic evaluation:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ellipfun('cd', 3.5, 0.5) -0.9891101840595543931308394 >>> ellipfun('cd', 3.5, q=0.25) 0.07111979240214668158441418 The sn-function is doubly periodic in the complex plane with periods `4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`):: >>> sn = ellipfun('sn') >>> sn(2, 0.25) 0.9628981775982774425751399 >>> sn(2+4*ellipk(0.25), 0.25) 0.9628981775982774425751399 >>> chop(sn(2+2*j*ellipk(1-0.25), 0.25)) 0.9628981775982774425751399 The cn-function is doubly periodic with periods `4 K(m)` and `4 i K(1-m)`:: >>> cn = ellipfun('cn') >>> cn(2, 0.25) -0.2698649654510865792581416 >>> cn(2+4*ellipk(0.25), 0.25) -0.2698649654510865792581416 >>> chop(cn(2+4*j*ellipk(1-0.25), 0.25)) -0.2698649654510865792581416 The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`:: >>> dn = ellipfun('dn') >>> dn(2, 0.25) 0.8764740583123262286931578 >>> dn(2+2*ellipk(0.25), 0.25) 0.8764740583123262286931578 >>> chop(dn(2+4*j*ellipk(1-0.25), 0.25)) 0.8764740583123262286931578 """ jtheta = r""" Computes the Jacobi theta function `\vartheta_n(z, q)`, where `n = 1, 2, 3, 4`, defined by the infinite series: .. math :: \vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} (-1)^n q^{n^2+n\,} \sin((2n+1)z) \vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} q^{n^{2\,} + n} \cos((2n+1)z) \vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty} q^{n^2\,} \cos(2 n z) \vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty} (-q)^{n^2\,} \cos(2 n z) The theta functions are functions of two variables: * `z` is the *argument*, an arbitrary real or complex number * `q` is the *nome*, which must be a real or complex number in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the series converge very quickly, so the Jacobi theta functions can efficiently be evaluated to high precision. The compact notations `\vartheta_n(q) = \vartheta_n(0,q)` and `\vartheta_n = \vartheta_n(0,q)` are also frequently encountered. Finally, Jacobi theta functions are frequently considered as functions of the half-period ratio `\tau` and then usually denoted by `\vartheta_n(z|\tau)`. Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes a `d`-th derivative with respect to `z`. **Examples and basic properties** Considered as functions of `z`, the Jacobi theta functions may be viewed as generalizations of the ordinary trigonometric functions cos and sin. They are periodic functions:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> jtheta(1, 0.25, '0.2') 0.2945120798627300045053104 >>> jtheta(1, 0.25 + 2*pi, '0.2') 0.2945120798627300045053104 Indeed, the series defining the theta functions are essentially trigonometric Fourier series. The coefficients can be retrieved using :func:`~mpmath.fourier`:: >>> mp.dps = 10 >>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4)) ([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]) The Jacobi theta functions are also so-called quasiperiodic functions of `z` and `\tau`, meaning that for fixed `\tau`, `\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same except for an exponential factor:: >>> mp.dps = 25 >>> tau = 3*j/10 >>> q = exp(pi*j*tau) >>> z = 10 >>> jtheta(4, z+tau*pi, q) (-0.682420280786034687520568 + 1.526683999721399103332021j) >>> -exp(-2*j*z)/q * jtheta(4, z, q) (-0.682420280786034687520568 + 1.526683999721399103332021j) The Jacobi theta functions satisfy a huge number of other functional equations, such as the following identity (valid for any `q`):: >>> q = mpf(3)/10 >>> jtheta(3,0,q)**4 6.823744089352763305137427 >>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4 6.823744089352763305137427 Extensive listings of identities satisfied by the Jacobi theta functions can be found in standard reference works. The Jacobi theta functions are related to the gamma function for special arguments:: >>> jtheta(3, 0, exp(-pi)) 1.086434811213308014575316 >>> pi**(1/4.) / gamma(3/4.) 1.086434811213308014575316 :func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex arguments:: >>> mp.dps = 50 >>> jtheta(4, sqrt(2), 0.5) 2.0549510717571539127004115835148878097035750653737 >>> mp.dps = 25 >>> jtheta(4, 1+2j, (1+j)/5) (7.180331760146805926356634 - 1.634292858119162417301683j) Evaluation of derivatives:: >>> mp.dps = 25 >>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7) 1.209857192844475388637236 1.209857192844475388637236 >>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2) -0.2598718791650217206533052 -0.2598718791650217206533052 >>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7) -1.150231437070259644461474 -1.150231437070259644461474 >>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2) -0.6226636990043777445898114 -0.6226636990043777445898114 >>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7) -0.9990312046096634316587882 -0.9990312046096634316587882 >>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2) -0.1530388693066334936151174 -0.1530388693066334936151174 >>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7) 0.9820995967262793943571139 0.9820995967262793943571139 >>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2) 0.3936902850291437081667755 0.3936902850291437081667755 **Possible issues** For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises ``ValueError``. This exception is also raised for `|q|` extremely close to 1 (or equivalently `\tau` very close to 0), since the series would converge too slowly:: >>> jtheta(1, 10, 0.99999999 * exp(0.5*j)) Traceback (most recent call last): ... ValueError: abs(q) > THETA_Q_LIM = 1.000000 """ eulernum = r""" Gives the `n`-th Euler number, defined as the `n`-th derivative of `\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the Euler numbers give the coefficients of the Taylor series .. math :: \mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n. The Euler numbers are closely related to Bernoulli numbers and Bernoulli polynomials. They can also be evaluated in terms of Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`. **Examples** Computing the first few Euler numbers and verifying that they agree with the Taylor series:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> [eulernum(n) for n in range(11)] [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] >>> chop(diffs(sech, 0, 10)) [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently computes numerical approximations for large indices:: >>> eulernum(50) -6.053285248188621896314384e+54 >>> eulernum(1000) 3.887561841253070615257336e+2371 >>> eulernum(10**20) 4.346791453661149089338186e+1936958564106659551331 Comparing with an asymptotic formula for the Euler numbers:: >>> n = 10**5 >>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n 3.69919063017432362805663e+436961 >>> eulernum(n) 3.699193712834466537941283e+436961 Pass ``exact=True`` to obtain exact values of Euler numbers as integers:: >>> print(eulernum(50, exact=True)) -6053285248188621896314383785111649088103498225146815121 >>> print(eulernum(200, exact=True) % 10**10) 1925859625 >>> eulernum(1001, exact=True) 0 """ eulerpoly = r""" Evaluates the Euler polynomial `E_n(z)`, defined by the generating function representation .. math :: \frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}. The Euler polynomials may also be represented in terms of Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for example .. math :: E_n(z) = \frac{2}{n+1} \left( B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right) \right). Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see :func:`~mpmath.eulernum`). **Examples** Computing the coefficients of the first few Euler polynomials:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> for n in range(6): ... chop(taylor(lambda z: eulerpoly(n,z), 0, n)) ... [1.0] [-0.5, 1.0] [0.0, -1.0, 1.0] [0.25, 0.0, -1.5, 1.0] [0.0, 1.0, 0.0, -2.0, 1.0] [-0.5, 0.0, 2.5, 0.0, -2.5, 1.0] Evaluation for arbitrary `z`:: >>> eulerpoly(2,3) 6.0 >>> eulerpoly(5,4) 423.5 >>> eulerpoly(35, 11111111112) 3.994957561486776072734601e+351 >>> eulerpoly(4, 10+20j) (-47990.0 - 235980.0j) >>> eulerpoly(2, '-3.5e-5') 0.000035001225 >>> eulerpoly(3, 0.5) 0.0 >>> eulerpoly(55, -10**80) -1.0e+4400 >>> eulerpoly(5, -inf) -inf >>> eulerpoly(6, -inf) +inf Computing Euler numbers:: >>> 2**26 * eulerpoly(26,0.5) -4087072509293123892361.0 >>> eulernum(26) -4087072509293123892361.0 Evaluation is accurate for large `n` and small `z`:: >>> eulerpoly(100, 0.5) 2.29047999988194114177943e+108 >>> eulerpoly(1000, 10.5) 3.628120031122876847764566e+2070 >>> eulerpoly(10000, 10.5) 1.149364285543783412210773e+30688 """ spherharm = r""" Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`, .. math :: Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}} P_l^m(\cos \theta) e^{i m \phi} where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`). Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the azimuthal coordinate on a sphere. Care should be used since many different conventions for spherical coordinate variables are used. Usually spherical harmonics are considered for `l \in \mathbb{N}`, `m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi` are permitted to be complex numbers. .. note :: :func:`~mpmath.spherharm` returns a complex number, even if the value is purely real. **Plots** .. literalinclude :: /plots/spherharm40.py `Y_{4,0}`: .. image :: /plots/spherharm40.png `Y_{4,1}`: .. image :: /plots/spherharm41.png `Y_{4,2}`: .. image :: /plots/spherharm42.png `Y_{4,3}`: .. image :: /plots/spherharm43.png `Y_{4,4}`: .. image :: /plots/spherharm44.png **Examples** Some low-order spherical harmonics with reference values:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> theta = pi/4 >>> phi = pi/3 >>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0) (0.2820947917738781434740397 + 0.0j) (0.2820947917738781434740397 + 0.0j) >>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta) (0.1221506279757299803965962 - 0.2115710938304086076055298j) (0.1221506279757299803965962 - 0.2115710938304086076055298j) >>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0) (0.3454941494713354792652446 + 0.0j) (0.3454941494713354792652446 + 0.0j) >>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta) (-0.1221506279757299803965962 - 0.2115710938304086076055298j) (-0.1221506279757299803965962 - 0.2115710938304086076055298j) With the normalization convention used, the spherical harmonics are orthonormal on the unit sphere:: >>> sphere = [0,pi], [0,2*pi] >>> dS = lambda t,p: fp.sin(t) # differential element >>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p) >>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p)) >>> l1 = l2 = 3; m1 = m2 = 2 >>> print(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)) (1+0j) >>> m2 = 1 # m1 != m2 >>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))) 0.0 Evaluation is accurate for large orders:: >>> spherharm(1000,750,0.5,0.25) (3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j) Evaluation works with complex parameter values:: >>> spherharm(1+j, 2j, 2+3j, -0.5j) (64.44922331113759992154992 + 1981.693919841408089681743j) """ scorergi = r""" Evaluates the Scorer function .. math :: \operatorname{Gi}(z) = \operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt + \operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt which gives a particular solution to the inhomogeneous Airy differential equation `f''(z) - z f(z) = 1/\pi`. Another particular solution is given by the Scorer Hi-function (:func:`~mpmath.scorerhi`). The two functions are related as `\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`. **Plots** .. literalinclude :: /plots/gi.py .. image :: /plots/gi.png .. literalinclude :: /plots/gi_c.py .. image :: /plots/gi_c.png **Examples** Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3')) 0.2049755424820002450503075 0.2049755424820002450503075 >>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3')) 0.1494294524512754526382746 0.1494294524512754526382746 >>> scorergi(+inf); scorergi(-inf) 0.0 0.0 >>> scorergi(1) 0.2352184398104379375986902 >>> scorergi(-1) -0.1166722172960152826494198 Evaluation for large arguments:: >>> scorergi(10) 0.03189600510067958798062034 >>> scorergi(100) 0.003183105228162961476590531 >>> scorergi(1000000) 0.0000003183098861837906721743873 >>> 1/(pi*1000000) 0.0000003183098861837906715377675 >>> scorergi(-1000) -0.08358288400262780392338014 >>> scorergi(-100000) 0.02886866118619660226809581 >>> scorergi(50+10j) (0.0061214102799778578790984 - 0.001224335676457532180747917j) >>> scorergi(-50-10j) (5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j) >>> scorergi(100000j) (-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j) Verifying the connection between Gi and Hi:: >>> z = 0.25 >>> scorergi(z) + scorerhi(z) 0.7287469039362150078694543 >>> airybi(z) 0.7287469039362150078694543 Verifying the differential equation:: >>> for z in [-3.4, 0, 2.5, 1+2j]: ... chop(diff(scorergi,z,2) - z*scorergi(z)) ... -0.3183098861837906715377675 -0.3183098861837906715377675 -0.3183098861837906715377675 -0.3183098861837906715377675 Verifying the integral representation:: >>> z = 0.5 >>> scorergi(z) 0.2447210432765581976910539 >>> Ai,Bi = airyai,airybi >>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1)) 0.2447210432765581976910539 **References** 1. [DLMF]_ section 9.12: Scorer Functions """ scorerhi = r""" Evaluates the second Scorer function .. math :: \operatorname{Hi}(z) = \operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt - \operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt which gives a particular solution to the inhomogeneous Airy differential equation `f''(z) - z f(z) = 1/\pi`. See also :func:`~mpmath.scorergi`. **Plots** .. literalinclude :: /plots/hi.py .. image :: /plots/hi.png .. literalinclude :: /plots/hi_c.py .. image :: /plots/hi_c.png **Examples** Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3')) 0.4099510849640004901006149 0.4099510849640004901006149 >>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3')) 0.2988589049025509052765491 0.2988589049025509052765491 >>> scorerhi(+inf); scorerhi(-inf) +inf 0.0 >>> scorerhi(1) 0.9722051551424333218376886 >>> scorerhi(-1) 0.2206696067929598945381098 Evaluation for large arguments:: >>> scorerhi(10) 455641153.5163291358991077 >>> scorerhi(100) 6.041223996670201399005265e+288 >>> scorerhi(1000000) 7.138269638197858094311122e+289529652 >>> scorerhi(-10) 0.0317685352825022727415011 >>> scorerhi(-100) 0.003183092495767499864680483 >>> scorerhi(100j) (-6.366197716545672122983857e-9 + 0.003183098861710582761688475j) >>> scorerhi(50+50j) (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) >>> scorerhi(-1000-1000j) (0.0001591549432510502796565538 - 0.000159154943091895334973109j) Verifying the differential equation:: >>> for z in [-3.4, 0, 2, 1+2j]: ... chop(diff(scorerhi,z,2) - z*scorerhi(z)) ... 0.3183098861837906715377675 0.3183098861837906715377675 0.3183098861837906715377675 0.3183098861837906715377675 Verifying the integral representation:: >>> z = 0.5 >>> scorerhi(z) 0.6095559998265972956089949 >>> Ai,Bi = airyai,airybi >>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1)) 0.6095559998265972956089949 """ stirling1 = r""" Gives the Stirling number of the first kind `s(n,k)`, defined by .. math :: x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k. The value is computed using an integer recurrence. The implementation is not optimized for approximating large values quickly. **Examples** Comparing with the generating function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> taylor(lambda x: ff(x, 5), 0, 5) [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] >>> [stirling1(5, k) for k in range(6)] [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] Recurrence relation:: >>> n, k = 5, 3 >>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1) 0.0 The matrices of Stirling numbers of first and second kind are inverses of each other:: >>> A = matrix(5, 5); B = matrix(5, 5) >>> for n in range(5): ... for k in range(5): ... A[n,k] = stirling1(n,k) ... B[n,k] = stirling2(n,k) ... >>> A * B [1.0 0.0 0.0 0.0 0.0] [0.0 1.0 0.0 0.0 0.0] [0.0 0.0 1.0 0.0 0.0] [0.0 0.0 0.0 1.0 0.0] [0.0 0.0 0.0 0.0 1.0] Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: >>> stirling1(42, 5) -2.864498971768501633736628e+50 >>> print stirling1(42, 5, exact=True) -286449897176850163373662803014001546235808317440000 """ stirling2 = r""" Gives the Stirling number of the second kind `S(n,k)`, defined by .. math :: x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1) The value is computed using integer arithmetic to evaluate a power sum. The implementation is not optimized for approximating large values quickly. **Examples** Comparing with the generating function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5) [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] Recurrence relation:: >>> n, k = 5, 3 >>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1) 0.0 Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: >>> stirling2(52, 10) 2.641822121003543906807485e+45 >>> print stirling2(52, 10, exact=True) 2641822121003543906807485307053638921722527655 """
279,815
26.892344
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/visualization.py
""" Plotting (requires matplotlib) """ from colorsys import hsv_to_rgb, hls_to_rgb from .libmp import NoConvergence from .libmp.backend import xrange class VisualizationMethods(object): plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence) def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None, singularities=[], axes=None): r""" Shows a simple 2D plot of a function `f(x)` or list of functions `[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval specified by *xlim*. Some examples:: plot(lambda x: exp(x)*li(x), [1, 4]) plot([cos, sin], [-4, 4]) plot([fresnels, fresnelc], [-4, 4]) plot([sqrt, cbrt], [-4, 4]) plot(lambda t: zeta(0.5+t*j), [-20, 20]) plot([floor, ceil, abs, sign], [-5, 5]) Points where the function raises a numerical exception or returns an infinite value are removed from the graph. Singularities can also be excluded explicitly as follows (useful for removing erroneous vertical lines):: plot(cot, ylim=[-5, 5]) # bad plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good For parts where the function assumes complex values, the real part is plotted with dashes and the imaginary part is plotted with dots. .. note :: This function requires matplotlib (pylab). """ if file: axes = None fig = None if not axes: import pylab fig = pylab.figure() axes = fig.add_subplot(111) if not isinstance(f, (tuple, list)): f = [f] a, b = xlim colors = ['b', 'r', 'g', 'm', 'k'] for n, func in enumerate(f): x = ctx.arange(a, b, (b-a)/float(points)) segments = [] segment = [] in_complex = False for i in xrange(len(x)): try: if i != 0: for sing in singularities: if x[i-1] <= sing and x[i] >= sing: raise ValueError v = func(x[i]) if ctx.isnan(v) or abs(v) > 1e300: raise ValueError if hasattr(v, "imag") and v.imag: re = float(v.real) im = float(v.imag) if not in_complex: in_complex = True segments.append(segment) segment = [] segment.append((float(x[i]), re, im)) else: if in_complex: in_complex = False segments.append(segment) segment = [] if hasattr(v, "real"): v = v.real segment.append((float(x[i]), v)) except ctx.plot_ignore: if segment: segments.append(segment) segment = [] if segment: segments.append(segment) for segment in segments: x = [s[0] for s in segment] y = [s[1] for s in segment] if not x: continue c = colors[n % len(colors)] if len(segment[0]) == 3: z = [s[2] for s in segment] axes.plot(x, y, '--'+c, linewidth=3) axes.plot(x, z, ':'+c, linewidth=3) else: axes.plot(x, y, c, linewidth=3) axes.set_xlim([float(_) for _ in xlim]) if ylim: axes.set_ylim([float(_) for _ in ylim]) axes.set_xlabel('x') axes.set_ylabel('f(x)') axes.grid(True) if fig: if file: pylab.savefig(file, dpi=dpi) else: pylab.show() def default_color_function(ctx, z): if ctx.isinf(z): return (1.0, 1.0, 1.0) if ctx.isnan(z): return (0.5, 0.5, 0.5) pi = 3.1415926535898 a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi) a = (a + 0.5) % 1.0 b = 1.0 - float(1/(1.0+abs(z)**0.3)) return hls_to_rgb(a, b, 0.8) blue_orange_colors = [ (-1.0, (0.0, 0.0, 0.0)), (-0.95, (0.1, 0.2, 0.5)), # dark blue (-0.5, (0.0, 0.5, 1.0)), # blueish (-0.05, (0.4, 0.8, 0.8)), # cyanish ( 0.0, (1.0, 1.0, 1.0)), ( 0.05, (1.0, 0.9, 0.3)), # yellowish ( 0.5, (0.9, 0.5, 0.0)), # orangeish ( 0.95, (0.7, 0.1, 0.0)), # redish ( 1.0, (0.0, 0.0, 0.0)), ( 2.0, (0.0, 0.0, 0.0)), ] def phase_color_function(ctx, z): if ctx.isinf(z): return (1.0, 1.0, 1.0) if ctx.isnan(z): return (0.5, 0.5, 0.5) pi = 3.1415926535898 w = float(ctx.arg(z)) / pi w = max(min(w, 1.0), -1.0) for i in range(1,len(blue_orange_colors)): if blue_orange_colors[i][0] > w: a, (ra, ga, ba) = blue_orange_colors[i-1] b, (rb, gb, bb) = blue_orange_colors[i] s = (w-a) / (b-a) return ra+(rb-ra)*s, ga+(gb-ga)*s, ba+(bb-ba)*s def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None, verbose=False, file=None, dpi=None, axes=None): """ Plots the given complex-valued function *f* over a rectangular part of the complex plane specified by the pairs of intervals *re* and *im*. For example:: cplot(lambda z: z, [-2, 2], [-10, 10]) cplot(exp) cplot(zeta, [0, 1], [0, 50]) By default, the complex argument (phase) is shown as color (hue) and the magnitude is show as brightness. You can also supply a custom color function (*color*). This function should take a complex number as input and return an RGB 3-tuple containing floats in the range 0.0-1.0. Alternatively, you can select a builtin color function by passing a string as *color*: * "default" - default color scheme * "phase" - a color scheme that only renders the phase of the function, with white for positive reals, black for negative reals, gold in the upper half plane, and blue in the lower half plane. To obtain a sharp image, the number of points may need to be increased to 100,000 or thereabout. Since evaluating the function that many times is likely to be slow, the 'verbose' option is useful to display progress. .. note :: This function requires matplotlib (pylab). """ if color is None or color == "default": color = ctx.default_color_function if color == "phase": color = ctx.phase_color_function import pylab if file: axes = None fig = None if not axes: fig = pylab.figure() axes = fig.add_subplot(111) rea, reb = re ima, imb = im dre = reb - rea dim = imb - ima M = int(ctx.sqrt(points*dre/dim)+1) N = int(ctx.sqrt(points*dim/dre)+1) x = pylab.linspace(rea, reb, M) y = pylab.linspace(ima, imb, N) # Note: we have to be careful to get the right rotation. # Test with these plots: # cplot(lambda z: z if z.real < 0 else 0) # cplot(lambda z: z if z.imag < 0 else 0) w = pylab.zeros((N, M, 3)) for n in xrange(N): for m in xrange(M): z = ctx.mpc(x[m], y[n]) try: v = color(f(z)) except ctx.plot_ignore: v = (0.5, 0.5, 0.5) w[n,m] = v if verbose: print(str(n) + ' of ' + str(N)) rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]] axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower') axes.set_xlabel('Re(z)') axes.set_ylabel('Im(z)') if fig: if file: pylab.savefig(file, dpi=dpi) else: pylab.show() def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \ wireframe=False, file=None, dpi=None, axes=None): """ Plots the surface defined by `f`. If `f` returns a single component, then this plots the surface defined by `z = f(x,y)` over the rectangular domain with `x = u` and `y = v`. If `f` returns three components, then this plots the parametric surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`. For example, to plot a simple function:: >>> from mpmath import * >>> f = lambda x, y: sin(x+y)*cos(y) >>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP Plotting a donut:: >>> r, R = 1, 2.5 >>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)] >>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP .. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher. """ import pylab import mpl_toolkits.mplot3d as mplot3d if file: axes = None fig = None if not axes: fig = pylab.figure() axes = mplot3d.axes3d.Axes3D(fig) ua, ub = u va, vb = v du = ub - ua dv = vb - va if not isinstance(points, (list, tuple)): points = [points, points] M, N = points u = pylab.linspace(ua, ub, M) v = pylab.linspace(va, vb, N) x, y, z = [pylab.zeros((M, N)) for i in xrange(3)] xab, yab, zab = [[0, 0] for i in xrange(3)] for n in xrange(N): for m in xrange(M): fdata = f(ctx.convert(u[m]), ctx.convert(v[n])) try: x[m,n], y[m,n], z[m,n] = fdata except TypeError: x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]: if c < cab[0]: cab[0] = c if c > cab[1]: cab[1] = c if wireframe: axes.plot_wireframe(x, y, z, rstride=4, cstride=4) else: axes.plot_surface(x, y, z, rstride=4, cstride=4) axes.set_xlabel('x') axes.set_ylabel('y') axes.set_zlabel('z') if keep_aspect: dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]] maxd = max(dx, dy, dz) if dx < maxd: delta = maxd - dx axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0) if dy < maxd: delta = maxd - dy axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0) if dz < maxd: delta = maxd - dz axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0) if fig: if file: pylab.savefig(file, dpi=dpi) else: pylab.show() VisualizationMethods.plot = plot VisualizationMethods.default_color_function = default_color_function VisualizationMethods.phase_color_function = phase_color_function VisualizationMethods.cplot = cplot VisualizationMethods.splot = splot
10,627
32.847134
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/usertools.py
def monitor(f, input='print', output='print'): """ Returns a wrapped copy of *f* that monitors evaluation by calling *input* with every input (*args*, *kwargs*) passed to *f* and *output* with every value returned from *f*. The default action (specify using the special string value ``'print'``) is to print inputs and outputs to stdout, along with the total evaluation count:: >>> from mpmath import * >>> mp.dps = 5; mp.pretty = False >>> diff(monitor(exp), 1) # diff will eval f(x-h) and f(x+h) in 0 (mpf('0.99999999906867742538452148'),) {} out 0 mpf('2.7182818259274480055282064') in 1 (mpf('1.0000000009313225746154785'),) {} out 1 mpf('2.7182818309906424675501024') mpf('2.7182808') To disable either the input or the output handler, you may pass *None* as argument. Custom input and output handlers may be used e.g. to store results for later analysis:: >>> mp.dps = 15 >>> input = [] >>> output = [] >>> findroot(monitor(sin, input.append, output.append), 3.0) mpf('3.1415926535897932') >>> len(input) # Count number of evaluations 9 >>> print(input[3]); print(output[3]) ((mpf('3.1415076583334066'),), {}) 8.49952562843408e-5 >>> print(input[4]); print(output[4]) ((mpf('3.1415928201669122'),), {}) -1.66577118985331e-7 """ if not input: input = lambda v: None elif input == 'print': incount = [0] def input(value): args, kwargs = value print("in %s %r %r" % (incount[0], args, kwargs)) incount[0] += 1 if not output: output = lambda v: None elif output == 'print': outcount = [0] def output(value): print("out %s %r" % (outcount[0], value)) outcount[0] += 1 def f_monitored(*args, **kwargs): input((args, kwargs)) v = f(*args, **kwargs) output(v) return v return f_monitored def timing(f, *args, **kwargs): """ Returns time elapsed for evaluating ``f()``. Optionally arguments may be passed to time the execution of ``f(*args, **kwargs)``. If the first call is very quick, ``f`` is called repeatedly and the best time is returned. """ once = kwargs.get('once') if 'once' in kwargs: del kwargs['once'] if args or kwargs: if len(args) == 1 and not kwargs: arg = args[0] g = lambda: f(arg) else: g = lambda: f(*args, **kwargs) else: g = f from timeit import default_timer as clock t1=clock(); v=g(); t2=clock(); t=t2-t1 if t > 0.05 or once: return t for i in range(3): t1=clock(); # Evaluate multiple times because the timer function # has a significant overhead g();g();g();g();g();g();g();g();g();g() t2=clock() t=min(t,(t2-t1)/10) return t
3,029
31.234043
70
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/conftest.py
import os # This makes py.test put mpath directory into the sys.path, so that we can # import mpmath" from tests nicely rootdir = os.path.abspath(os.getcwd())
160
25.833333
74
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/ctx_base.py
from operator import gt, lt from .libmp.backend import xrange from .functions.functions import SpecialFunctions from .functions.rszeta import RSCache from .calculus.quadrature import QuadratureMethods from .calculus.inverselaplace import LaplaceTransformInversionMethods from .calculus.calculus import CalculusMethods from .calculus.optimization import OptimizationMethods from .calculus.odes import ODEMethods from .matrices.matrices import MatrixMethods from .matrices.calculus import MatrixCalculusMethods from .matrices.linalg import LinearAlgebraMethods from .matrices.eigen import Eigen from .identification import IdentificationMethods from .visualization import VisualizationMethods from . import libmp class Context(object): pass class StandardBaseContext(Context, SpecialFunctions, RSCache, QuadratureMethods, LaplaceTransformInversionMethods, CalculusMethods, MatrixMethods, MatrixCalculusMethods, LinearAlgebraMethods, Eigen, IdentificationMethods, OptimizationMethods, ODEMethods, VisualizationMethods): NoConvergence = libmp.NoConvergence ComplexResult = libmp.ComplexResult def __init__(ctx): ctx._aliases = {} # Call those that need preinitialization (e.g. for wrappers) SpecialFunctions.__init__(ctx) RSCache.__init__(ctx) QuadratureMethods.__init__(ctx) LaplaceTransformInversionMethods.__init__(ctx) CalculusMethods.__init__(ctx) MatrixMethods.__init__(ctx) def _init_aliases(ctx): for alias, value in ctx._aliases.items(): try: setattr(ctx, alias, getattr(ctx, value)) except AttributeError: pass _fixed_precision = False # XXX verbose = False def warn(ctx, msg): print("Warning:", msg) def bad_domain(ctx, msg): raise ValueError(msg) def _re(ctx, x): if hasattr(x, "real"): return x.real return x def _im(ctx, x): if hasattr(x, "imag"): return x.imag return ctx.zero def _as_points(ctx, x): return x def fneg(ctx, x, **kwargs): return -ctx.convert(x) def fadd(ctx, x, y, **kwargs): return ctx.convert(x)+ctx.convert(y) def fsub(ctx, x, y, **kwargs): return ctx.convert(x)-ctx.convert(y) def fmul(ctx, x, y, **kwargs): return ctx.convert(x)*ctx.convert(y) def fdiv(ctx, x, y, **kwargs): return ctx.convert(x)/ctx.convert(y) def fsum(ctx, args, absolute=False, squared=False): if absolute: if squared: return sum((abs(x)**2 for x in args), ctx.zero) return sum((abs(x) for x in args), ctx.zero) if squared: return sum((x**2 for x in args), ctx.zero) return sum(args, ctx.zero) def fdot(ctx, xs, ys=None, conjugate=False): if ys is not None: xs = zip(xs, ys) if conjugate: cf = ctx.conj return sum((x*cf(y) for (x,y) in xs), ctx.zero) else: return sum((x*y for (x,y) in xs), ctx.zero) def fprod(ctx, args): prod = ctx.one for arg in args: prod *= arg return prod def nprint(ctx, x, n=6, **kwargs): """ Equivalent to ``print(nstr(x, n))``. """ print(ctx.nstr(x, n, **kwargs)) def chop(ctx, x, tol=None): """ Chops off small real or imaginary parts, or converts numbers close to zero to exact zeros. The input can be a single number or an iterable:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> chop(5+1e-10j, tol=1e-9) mpf('5.0') >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2])) [1.0, 0.0, 3.0, -4.0, 2.0] The tolerance defaults to ``100*eps``. """ if tol is None: tol = 100*ctx.eps try: x = ctx.convert(x) absx = abs(x) if abs(x) < tol: return ctx.zero if ctx._is_complex_type(x): #part_tol = min(tol, absx*tol) part_tol = max(tol, absx*tol) if abs(x.imag) < part_tol: return x.real if abs(x.real) < part_tol: return ctx.mpc(0, x.imag) except TypeError: if isinstance(x, ctx.matrix): return x.apply(lambda a: ctx.chop(a, tol)) if hasattr(x, "__iter__"): return [ctx.chop(a, tol) for a in x] return x def almosteq(ctx, s, t, rel_eps=None, abs_eps=None): r""" Determine whether the difference between `s` and `t` is smaller than a given epsilon, either relatively or absolutely. Both a maximum relative difference and a maximum difference ('epsilons') may be specified. The absolute difference is defined as `|s-t|` and the relative difference is defined as `|s-t|/\max(|s|, |t|)`. If only one epsilon is given, both are set to the same value. If none is given, both epsilons are set to `2^{-p+m}` where `p` is the current working precision and `m` is a small integer. The default setting typically allows :func:`~mpmath.almosteq` to be used to check for mathematical equality in the presence of small rounding errors. **Examples** >>> from mpmath import * >>> mp.dps = 15 >>> almosteq(3.141592653589793, 3.141592653589790) True >>> almosteq(3.141592653589793, 3.141592653589700) False >>> almosteq(3.141592653589793, 3.141592653589700, 1e-10) True >>> almosteq(1e-20, 2e-20) True >>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0) False """ t = ctx.convert(t) if abs_eps is None and rel_eps is None: rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4) if abs_eps is None: abs_eps = rel_eps elif rel_eps is None: rel_eps = abs_eps diff = abs(s-t) if diff <= abs_eps: return True abss = abs(s) abst = abs(t) if abss < abst: err = diff/abst else: err = diff/abss return err <= rel_eps def arange(ctx, *args): r""" This is a generalized version of Python's :func:`~mpmath.range` function that accepts fractional endpoints and step sizes and returns a list of ``mpf`` instances. Like :func:`~mpmath.range`, :func:`~mpmath.arange` can be called with 1, 2 or 3 arguments: ``arange(b)`` `[0, 1, 2, \ldots, x]` ``arange(a, b)`` `[a, a+1, a+2, \ldots, x]` ``arange(a, b, h)`` `[a, a+h, a+h, \ldots, x]` where `b-1 \le x < b` (in the third case, `b-h \le x < b`). Like Python's :func:`~mpmath.range`, the endpoint is not included. To produce ranges where the endpoint is included, :func:`~mpmath.linspace` is more convenient. **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> arange(4) [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')] >>> arange(1, 2, 0.25) [mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')] >>> arange(1, -1, -0.75) [mpf('1.0'), mpf('0.25'), mpf('-0.5')] """ if not len(args) <= 3: raise TypeError('arange expected at most 3 arguments, got %i' % len(args)) if not len(args) >= 1: raise TypeError('arange expected at least 1 argument, got %i' % len(args)) # set default a = 0 dt = 1 # interpret arguments if len(args) == 1: b = args[0] elif len(args) >= 2: a = args[0] b = args[1] if len(args) == 3: dt = args[2] a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt) assert a + dt != a, 'dt is too small and would cause an infinite loop' # adapt code for sign of dt if a > b: if dt > 0: return [] op = gt else: if dt < 0: return [] op = lt # create list result = [] i = 0 t = a while 1: t = a + dt*i i += 1 if op(t, b): result.append(t) else: break return result def linspace(ctx, *args, **kwargs): """ ``linspace(a, b, n)`` returns a list of `n` evenly spaced samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)`` is also valid. This function is often more convenient than :func:`~mpmath.arange` for partitioning an interval into subintervals, since the endpoint is included:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> linspace(1, 4, 4) [mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')] You may also provide the keyword argument ``endpoint=False``:: >>> linspace(1, 4, 4, endpoint=False) [mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')] """ if len(args) == 3: a = ctx.mpf(args[0]) b = ctx.mpf(args[1]) n = int(args[2]) elif len(args) == 2: assert hasattr(args[0], '_mpi_') a = args[0].a b = args[0].b n = int(args[1]) else: raise TypeError('linspace expected 2 or 3 arguments, got %i' \ % len(args)) if n < 1: raise ValueError('n must be greater than 0') if not 'endpoint' in kwargs or kwargs['endpoint']: if n == 1: return [ctx.mpf(a)] step = (b - a) / ctx.mpf(n - 1) y = [i*step + a for i in xrange(n)] y[-1] = b else: step = (b - a) / ctx.mpf(n) y = [i*step + a for i in xrange(n)] return y def cos_sin(ctx, z, **kwargs): return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs) def cospi_sinpi(ctx, z, **kwargs): return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs) def _default_hyper_maxprec(ctx, p): return int(1000 * p**0.25 + 4*p) _gcd = staticmethod(libmp.gcd) list_primes = staticmethod(libmp.list_primes) isprime = staticmethod(libmp.isprime) bernfrac = staticmethod(libmp.bernfrac) moebius = staticmethod(libmp.moebius) _ifac = staticmethod(libmp.ifac) _eulernum = staticmethod(libmp.eulernum) _stirling1 = staticmethod(libmp.stirling1) _stirling2 = staticmethod(libmp.stirling2) def sum_accurately(ctx, terms, check_step=1): prec = ctx.prec try: extraprec = 10 while 1: ctx.prec = prec + extraprec + 5 max_mag = ctx.ninf s = ctx.zero k = 0 for term in terms(): s += term if (not k % check_step) and term: term_mag = ctx.mag(term) max_mag = max(max_mag, term_mag) sum_mag = ctx.mag(s) if sum_mag - term_mag > ctx.prec: break k += 1 cancellation = max_mag - sum_mag if cancellation != cancellation: break if cancellation < extraprec or ctx._fixed_precision: break extraprec += min(ctx.prec, cancellation) return s finally: ctx.prec = prec def mul_accurately(ctx, factors, check_step=1): prec = ctx.prec try: extraprec = 10 while 1: ctx.prec = prec + extraprec + 5 max_mag = ctx.ninf one = ctx.one s = one k = 0 for factor in factors(): s *= factor term = factor - one if (not k % check_step): term_mag = ctx.mag(term) max_mag = max(max_mag, term_mag) sum_mag = ctx.mag(s-one) #if sum_mag - term_mag > ctx.prec: # break if -term_mag > ctx.prec: break k += 1 cancellation = max_mag - sum_mag if cancellation != cancellation: break if cancellation < extraprec or ctx._fixed_precision: break extraprec += min(ctx.prec, cancellation) return s finally: ctx.prec = prec def power(ctx, x, y): r"""Converts `x` and `y` to mpmath numbers and evaluates `x^y = \exp(y \log(x))`:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> power(2, 0.5) 1.41421356237309504880168872421 This shows the leading few digits of a large Mersenne prime (performing the exact calculation ``2**43112609-1`` and displaying the result in Python would be very slow):: >>> power(2, 43112609)-1 3.16470269330255923143453723949e+12978188 """ return ctx.convert(x) ** ctx.convert(y) def _zeta_int(ctx, n): return ctx.zeta(n) def maxcalls(ctx, f, N): """ Return a wrapped copy of *f* that raises ``NoConvergence`` when *f* has been called more than *N* times:: >>> from mpmath import * >>> mp.dps = 15 >>> f = maxcalls(sin, 10) >>> print(sum(f(n) for n in range(10))) 1.95520948210738 >>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... NoConvergence: maxcalls: function evaluated 10 times """ counter = [0] def f_maxcalls_wrapped(*args, **kwargs): counter[0] += 1 if counter[0] > N: raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N) return f(*args, **kwargs) return f_maxcalls_wrapped def memoize(ctx, f): """ Return a wrapped copy of *f* that caches computed values, i.e. a memoized copy of *f*. Values are only reused if the cached precision is equal to or higher than the working precision:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> f = memoize(maxcalls(sin, 1)) >>> f(2) 0.909297426825682 >>> f(2) 0.909297426825682 >>> mp.dps = 25 >>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... NoConvergence: maxcalls: function evaluated 1 times """ f_cache = {} def f_cached(*args, **kwargs): if kwargs: key = args, tuple(kwargs.items()) else: key = args prec = ctx.prec if key in f_cache: cprec, cvalue = f_cache[key] if cprec >= prec: return +cvalue value = f(*args, **kwargs) f_cache[key] = (prec, value) return value f_cached.__name__ = f.__name__ f_cached.__doc__ = f.__doc__ return f_cached
15,985
31.294949
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/math2.py
""" This module complements the math and cmath builtin modules by providing fast machine precision versions of some additional functions (gamma, ...) and wrapping math/cmath functions so that they can be called with either real or complex arguments. """ import operator import math import cmath # Irrational (?) constants pi = 3.1415926535897932385 e = 2.7182818284590452354 sqrt2 = 1.4142135623730950488 sqrt5 = 2.2360679774997896964 phi = 1.6180339887498948482 ln2 = 0.69314718055994530942 ln10 = 2.302585092994045684 euler = 0.57721566490153286061 catalan = 0.91596559417721901505 khinchin = 2.6854520010653064453 apery = 1.2020569031595942854 logpi = 1.1447298858494001741 def _mathfun_real(f_real, f_complex): def f(x, **kwargs): if type(x) is float: return f_real(x) if type(x) is complex: return f_complex(x) try: x = float(x) return f_real(x) except (TypeError, ValueError): x = complex(x) return f_complex(x) f.__name__ = f_real.__name__ return f def _mathfun(f_real, f_complex): def f(x, **kwargs): if type(x) is complex: return f_complex(x) try: return f_real(float(x)) except (TypeError, ValueError): return f_complex(complex(x)) f.__name__ = f_real.__name__ return f def _mathfun_n(f_real, f_complex): def f(*args, **kwargs): try: return f_real(*(float(x) for x in args)) except (TypeError, ValueError): return f_complex(*(complex(x) for x in args)) f.__name__ = f_real.__name__ return f # Workaround for non-raising log and sqrt in Python 2.5 and 2.4 # on Unix system try: math.log(-2.0) def math_log(x): if x <= 0.0: raise ValueError("math domain error") return math.log(x) def math_sqrt(x): if x < 0.0: raise ValueError("math domain error") return math.sqrt(x) except (ValueError, TypeError): math_log = math.log math_sqrt = math.sqrt pow = _mathfun_n(operator.pow, lambda x, y: complex(x)**y) log = _mathfun_n(math_log, cmath.log) sqrt = _mathfun(math_sqrt, cmath.sqrt) exp = _mathfun_real(math.exp, cmath.exp) cos = _mathfun_real(math.cos, cmath.cos) sin = _mathfun_real(math.sin, cmath.sin) tan = _mathfun_real(math.tan, cmath.tan) acos = _mathfun(math.acos, cmath.acos) asin = _mathfun(math.asin, cmath.asin) atan = _mathfun_real(math.atan, cmath.atan) cosh = _mathfun_real(math.cosh, cmath.cosh) sinh = _mathfun_real(math.sinh, cmath.sinh) tanh = _mathfun_real(math.tanh, cmath.tanh) floor = _mathfun_real(math.floor, lambda z: complex(math.floor(z.real), math.floor(z.imag))) ceil = _mathfun_real(math.ceil, lambda z: complex(math.ceil(z.real), math.ceil(z.imag))) cos_sin = _mathfun_real(lambda x: (math.cos(x), math.sin(x)), lambda z: (cmath.cos(z), cmath.sin(z))) cbrt = _mathfun(lambda x: x**(1./3), lambda z: z**(1./3)) def nthroot(x, n): r = 1./n try: return float(x) ** r except (ValueError, TypeError): return complex(x) ** r def _sinpi_real(x): if x < 0: return -_sinpi_real(-x) n, r = divmod(x, 0.5) r *= pi n %= 4 if n == 0: return math.sin(r) if n == 1: return math.cos(r) if n == 2: return -math.sin(r) if n == 3: return -math.cos(r) def _cospi_real(x): if x < 0: x = -x n, r = divmod(x, 0.5) r *= pi n %= 4 if n == 0: return math.cos(r) if n == 1: return -math.sin(r) if n == 2: return -math.cos(r) if n == 3: return math.sin(r) def _sinpi_complex(z): if z.real < 0: return -_sinpi_complex(-z) n, r = divmod(z.real, 0.5) z = pi*complex(r, z.imag) n %= 4 if n == 0: return cmath.sin(z) if n == 1: return cmath.cos(z) if n == 2: return -cmath.sin(z) if n == 3: return -cmath.cos(z) def _cospi_complex(z): if z.real < 0: z = -z n, r = divmod(z.real, 0.5) z = pi*complex(r, z.imag) n %= 4 if n == 0: return cmath.cos(z) if n == 1: return -cmath.sin(z) if n == 2: return -cmath.cos(z) if n == 3: return cmath.sin(z) cospi = _mathfun_real(_cospi_real, _cospi_complex) sinpi = _mathfun_real(_sinpi_real, _sinpi_complex) def tanpi(x): try: return sinpi(x) / cospi(x) except OverflowError: if complex(x).imag > 10: return 1j if complex(x).imag < 10: return -1j raise def cotpi(x): try: return cospi(x) / sinpi(x) except OverflowError: if complex(x).imag > 10: return -1j if complex(x).imag < 10: return 1j raise INF = 1e300*1e300 NINF = -INF NAN = INF-INF EPS = 2.2204460492503131e-16 _exact_gamma = (INF, 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0, 362880.0, 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0, 1307674368000.0, 20922789888000.0, 355687428096000.0, 6402373705728000.0, 121645100408832000.0, 2432902008176640000.0) _max_exact_gamma = len(_exact_gamma)-1 # Lanczos coefficients used by the GNU Scientific Library _lanczos_g = 7 _lanczos_p = (0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7) def _gamma_real(x): _intx = int(x) if _intx == x: if _intx <= 0: #return (-1)**_intx * INF raise ZeroDivisionError("gamma function pole") if _intx <= _max_exact_gamma: return _exact_gamma[_intx] if x < 0.5: # TODO: sinpi return pi / (_sinpi_real(x)*_gamma_real(1-x)) else: x -= 1.0 r = _lanczos_p[0] for i in range(1, _lanczos_g+2): r += _lanczos_p[i]/(x+i) t = x + _lanczos_g + 0.5 return 2.506628274631000502417 * t**(x+0.5) * math.exp(-t) * r def _gamma_complex(x): if not x.imag: return complex(_gamma_real(x.real)) if x.real < 0.5: # TODO: sinpi return pi / (_sinpi_complex(x)*_gamma_complex(1-x)) else: x -= 1.0 r = _lanczos_p[0] for i in range(1, _lanczos_g+2): r += _lanczos_p[i]/(x+i) t = x + _lanczos_g + 0.5 return 2.506628274631000502417 * t**(x+0.5) * cmath.exp(-t) * r gamma = _mathfun_real(_gamma_real, _gamma_complex) def rgamma(x): try: return 1./gamma(x) except ZeroDivisionError: return x*0.0 def factorial(x): return gamma(x+1.0) def arg(x): if type(x) is float: return math.atan2(0.0,x) return math.atan2(x.imag,x.real) # XXX: broken for negatives def loggamma(x): if type(x) not in (float, complex): try: x = float(x) except (ValueError, TypeError): x = complex(x) try: xreal = x.real ximag = x.imag except AttributeError: # py2.5 xreal = x ximag = 0.0 # Reflection formula # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0003/ if xreal < 0.0: if abs(x) < 0.5: v = log(gamma(x)) if ximag == 0: v = v.conjugate() return v z = 1-x try: re = z.real im = z.imag except AttributeError: # py2.5 re = z im = 0.0 refloor = floor(re) if im == 0.0: imsign = 0 elif im < 0.0: imsign = -1 else: imsign = 1 return (-pi*1j)*abs(refloor)*(1-abs(imsign)) + logpi - \ log(sinpi(z-refloor)) - loggamma(z) + 1j*pi*refloor*imsign if x == 1.0 or x == 2.0: return x*0 p = 0. while abs(x) < 11: p -= log(x) x += 1.0 s = 0.918938533204672742 + (x-0.5)*log(x) - x r = 1./x r2 = r*r s += 0.083333333333333333333*r; r *= r2 s += -0.0027777777777777777778*r; r *= r2 s += 0.00079365079365079365079*r; r *= r2 s += -0.0005952380952380952381*r; r *= r2 s += 0.00084175084175084175084*r; r *= r2 s += -0.0019175269175269175269*r; r *= r2 s += 0.0064102564102564102564*r; r *= r2 s += -0.02955065359477124183*r return s + p _psi_coeff = [ 0.083333333333333333333, -0.0083333333333333333333, 0.003968253968253968254, -0.0041666666666666666667, 0.0075757575757575757576, -0.021092796092796092796, 0.083333333333333333333, -0.44325980392156862745, 3.0539543302701197438, -26.456212121212121212] def _digamma_real(x): _intx = int(x) if _intx == x: if _intx <= 0: raise ZeroDivisionError("polygamma pole") if x < 0.5: x = 1.0-x s = pi*cotpi(x) else: s = 0.0 while x < 10.0: s -= 1.0/x x += 1.0 x2 = x**-2 t = x2 for c in _psi_coeff: s -= c*t if t < 1e-20: break t *= x2 return s + math_log(x) - 0.5/x def _digamma_complex(x): if not x.imag: return complex(_digamma_real(x.real)) if x.real < 0.5: x = 1.0-x s = pi*cotpi(x) else: s = 0.0 while abs(x) < 10.0: s -= 1.0/x x += 1.0 x2 = x**-2 t = x2 for c in _psi_coeff: s -= c*t if abs(t) < 1e-20: break t *= x2 return s + cmath.log(x) - 0.5/x digamma = _mathfun_real(_digamma_real, _digamma_complex) # TODO: could implement complex erf and erfc here. Need # to find an accurate method (avoiding cancellation) # for approx. 1 < abs(x) < 9. _erfc_coeff_P = [ 1.0000000161203922312, 2.1275306946297962644, 2.2280433377390253297, 1.4695509105618423961, 0.66275911699770787537, 0.20924776504163751585, 0.045459713768411264339, 0.0063065951710717791934, 0.00044560259661560421715][::-1] _erfc_coeff_Q = [ 1.0000000000000000000, 3.2559100272784894318, 4.9019435608903239131, 4.4971472894498014205, 2.7845640601891186528, 1.2146026030046904138, 0.37647108453729465912, 0.080970149639040548613, 0.011178148899483545902, 0.00078981003831980423513][::-1] def _polyval(coeffs, x): p = coeffs[0] for c in coeffs[1:]: p = c + x*p return p def _erf_taylor(x): # Taylor series assuming 0 <= x <= 1 x2 = x*x s = t = x n = 1 while abs(t) > 1e-17: t *= x2/n s -= t/(n+n+1) n += 1 t *= x2/n s += t/(n+n+1) n += 1 return 1.1283791670955125739*s def _erfc_mid(x): # Rational approximation assuming 0 <= x <= 9 return exp(-x*x)*_polyval(_erfc_coeff_P,x)/_polyval(_erfc_coeff_Q,x) def _erfc_asymp(x): # Asymptotic expansion assuming x >= 9 x2 = x*x v = exp(-x2)/x*0.56418958354775628695 r = t = 0.5 / x2 s = 1.0 for n in range(1,22,4): s -= t t *= r * (n+2) s += t t *= r * (n+4) if abs(t) < 1e-17: break return s * v def erf(x): """ erf of a real number. """ x = float(x) if x != x: return x if x < 0.0: return -erf(-x) if x >= 1.0: if x >= 6.0: return 1.0 return 1.0 - _erfc_mid(x) return _erf_taylor(x) def erfc(x): """ erfc of a real number. """ x = float(x) if x != x: return x if x < 0.0: if x < -6.0: return 2.0 return 2.0-erfc(-x) if x > 9.0: return _erfc_asymp(x) if x >= 1.0: return _erfc_mid(x) return 1.0 - _erf_taylor(x) gauss42 = [\ (0.99839961899006235, 0.0041059986046490839), (-0.99839961899006235, 0.0041059986046490839), (0.9915772883408609, 0.009536220301748501), (-0.9915772883408609,0.009536220301748501), (0.97934250806374812, 0.014922443697357493), (-0.97934250806374812, 0.014922443697357493), (0.96175936533820439,0.020227869569052644), (-0.96175936533820439, 0.020227869569052644), (0.93892355735498811, 0.025422959526113047), (-0.93892355735498811,0.025422959526113047), (0.91095972490412735, 0.030479240699603467), (-0.91095972490412735, 0.030479240699603467), (0.87802056981217269,0.03536907109759211), (-0.87802056981217269, 0.03536907109759211), (0.8402859832618168, 0.040065735180692258), (-0.8402859832618168,0.040065735180692258), (0.7979620532554873, 0.044543577771965874), (-0.7979620532554873, 0.044543577771965874), (0.75127993568948048,0.048778140792803244), (-0.75127993568948048, 0.048778140792803244), (0.70049459055617114, 0.052746295699174064), (-0.70049459055617114,0.052746295699174064), (0.64588338886924779, 0.056426369358018376), (-0.64588338886924779, 0.056426369358018376), (0.58774459748510932, 0.059798262227586649), (-0.58774459748510932, 0.059798262227586649), (0.5263957499311922, 0.062843558045002565), (-0.5263957499311922, 0.062843558045002565), (0.46217191207042191, 0.065545624364908975), (-0.46217191207042191, 0.065545624364908975), (0.39542385204297503, 0.067889703376521934), (-0.39542385204297503, 0.067889703376521934), (0.32651612446541151, 0.069862992492594159), (-0.32651612446541151, 0.069862992492594159), (0.25582507934287907, 0.071454714265170971), (-0.25582507934287907, 0.071454714265170971), (0.18373680656485453, 0.072656175243804091), (-0.18373680656485453, 0.072656175243804091), (0.11064502720851986, 0.073460813453467527), (-0.11064502720851986, 0.073460813453467527), (0.036948943165351772, 0.073864234232172879), (-0.036948943165351772, 0.073864234232172879)] EI_ASYMP_CONVERGENCE_RADIUS = 40.0 def ei_asymp(z, _e1=False): r = 1./z s = t = 1.0 k = 1 while 1: t *= k*r s += t if abs(t) < 1e-16: break k += 1 v = s*exp(z)/z if _e1: if type(z) is complex: zreal = z.real zimag = z.imag else: zreal = z zimag = 0.0 if zimag == 0.0 and zreal > 0.0: v += pi*1j else: if type(z) is complex: if z.imag > 0: v += pi*1j if z.imag < 0: v -= pi*1j return v def ei_taylor(z, _e1=False): s = t = z k = 2 while 1: t = t*z/k term = t/k if abs(term) < 1e-17: break s += term k += 1 s += euler if _e1: s += log(-z) else: if type(z) is float or z.imag == 0.0: s += math_log(abs(z)) else: s += cmath.log(z) return s def ei(z, _e1=False): typez = type(z) if typez not in (float, complex): try: z = float(z) typez = float except (TypeError, ValueError): z = complex(z) typez = complex if not z: return -INF absz = abs(z) if absz > EI_ASYMP_CONVERGENCE_RADIUS: return ei_asymp(z, _e1) elif absz <= 2.0 or (typez is float and z > 0.0): return ei_taylor(z, _e1) # Integrate, starting from whichever is smaller of a Taylor # series value or an asymptotic series value if typez is complex and z.real > 0.0: zref = z / absz ref = ei_taylor(zref, _e1) else: zref = EI_ASYMP_CONVERGENCE_RADIUS * z / absz ref = ei_asymp(zref, _e1) C = (zref-z)*0.5 D = (zref+z)*0.5 s = 0.0 if type(z) is complex: _exp = cmath.exp else: _exp = math.exp for x,w in gauss42: t = C*x+D s += w*_exp(t)/t ref -= C*s return ref def e1(z): # hack to get consistent signs if the imaginary part if 0 # and signed typez = type(z) if type(z) not in (float, complex): try: z = float(z) typez = float except (TypeError, ValueError): z = complex(z) typez = complex if typez is complex and not z.imag: z = complex(z.real, 0.0) # end hack return -ei(-z, _e1=True) _zeta_int = [\ -0.5, 0.0, 1.6449340668482264365,1.2020569031595942854,1.0823232337111381915, 1.0369277551433699263,1.0173430619844491397,1.0083492773819228268, 1.0040773561979443394,1.0020083928260822144,1.0009945751278180853, 1.0004941886041194646,1.0002460865533080483,1.0001227133475784891, 1.0000612481350587048,1.0000305882363070205,1.0000152822594086519, 1.0000076371976378998,1.0000038172932649998,1.0000019082127165539, 1.0000009539620338728,1.0000004769329867878,1.0000002384505027277, 1.0000001192199259653,1.0000000596081890513,1.0000000298035035147, 1.0000000149015548284] _zeta_P = [-3.50000000087575873, -0.701274355654678147, -0.0672313458590012612, -0.00398731457954257841, -0.000160948723019303141, -4.67633010038383371e-6, -1.02078104417700585e-7, -1.68030037095896287e-9, -1.85231868742346722e-11][::-1] _zeta_Q = [1.00000000000000000, -0.936552848762465319, -0.0588835413263763741, -0.00441498861482948666, -0.000143416758067432622, -5.10691659585090782e-6, -9.58813053268913799e-8, -1.72963791443181972e-9, -1.83527919681474132e-11][::-1] _zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8, 2.01201845887608893e-7, -1.53917240683468381e-6, -5.09890411005967954e-7, 0.000122464707271619326, -0.000905721539353130232, -0.00239315326074843037, 0.084239750013159168, 0.418938517907442414, 0.500000001921884009] _zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9, 1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7, 0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713, 0.0842396947501199816, 0.418938533204660256, 0.500000000000000052] def zeta(s): """ Riemann zeta function, real argument """ if not isinstance(s, (float, int)): try: s = float(s) except (ValueError, TypeError): try: s = complex(s) if not s.imag: return complex(zeta(s.real)) except (ValueError, TypeError): pass raise NotImplementedError if s == 1: raise ValueError("zeta(1) pole") if s >= 27: return 1.0 + 2.0**(-s) + 3.0**(-s) n = int(s) if n == s: if n >= 0: return _zeta_int[n] if not (n % 2): return 0.0 if s <= 0.0: return 2.**s*pi**(s-1)*_sinpi_real(0.5*s)*_gamma_real(1-s)*zeta(1-s) if s <= 2.0: if s <= 1.0: return _polyval(_zeta_0,s)/(s-1) return _polyval(_zeta_1,s)/(s-1) z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s) return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z
18,561
26.580981
77
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/identification.py
""" Implements the PSLQ algorithm for integer relation detection, and derivative algorithms for constant recognition. """ from .libmp.backend import xrange from .libmp import int_types, sqrt_fixed # round to nearest integer (can be done more elegantly...) def round_fixed(x, prec): return ((x + (1<<(prec-1))) >> prec) << prec class IdentificationMethods(object): pass def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False): r""" Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)`` uses the PSLQ algorithm to find a list of integers `[c_0, c_1, ..., c_n]` such that .. math :: |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol} and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to 3/4 of the working precision. **Examples** Find rational approximations for `\pi`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> pslq([-1, pi], tol=0.01) [22, 7] >>> pslq([-1, pi], tol=0.001) [355, 113] >>> mpf(22)/7; mpf(355)/113; +pi 3.14285714285714 3.14159292035398 3.14159265358979 Pi is not a rational number with denominator less than 1000:: >>> pslq([-1, pi]) >>> To within the standard precision, it can however be approximated by at least one rational number with denominator less than `10^{12}`:: >>> p, q = pslq([-1, pi], maxcoeff=10**12) >>> print(p); print(q) 238410049439 75888275702 >>> mpf(p)/q 3.14159265358979 The PSLQ algorithm can be applied to long vectors. For example, we can investigate the rational (in)dependence of integer square roots:: >>> mp.dps = 30 >>> pslq([sqrt(n) for n in range(2, 5+1)]) >>> >>> pslq([sqrt(n) for n in range(2, 6+1)]) >>> >>> pslq([sqrt(n) for n in range(2, 8+1)]) [2, 0, 0, 0, 0, 0, -1] **Machin formulas** A famous formula for `\pi` is Machin's, .. math :: \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239 There are actually infinitely many formulas of this type. Two others are .. math :: \frac{\pi}{4} = \operatorname{acot} 1 \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57 + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443 We can easily verify the formulas using the PSLQ algorithm:: >>> mp.dps = 30 >>> pslq([pi/4, acot(1)]) [1, -1] >>> pslq([pi/4, acot(5), acot(239)]) [1, -4, 1] >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)]) [1, -12, -32, 5, -12] We could try to generate a custom Machin-like formula by running the PSLQ algorithm with a few inverse cotangent values, for example acot(2), acot(3) ... acot(10). Unfortunately, there is a linear dependence among these values, resulting in only that dependence being detected, with a zero coefficient for `\pi`:: >>> pslq([pi] + [acot(n) for n in range(2,11)]) [0, 1, -1, 0, 0, 0, -1, 0, 0, 0] We get better luck by removing linearly dependent terms:: >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)]) [1, -8, 0, 0, 4, 0, 0, 0] In other words, we found the following formula:: >>> 8*acot(2) - 4*acot(7) 3.14159265358979323846264338328 >>> +pi 3.14159265358979323846264338328 **Algorithm** This is a fairly direct translation to Python of the pseudocode given by David Bailey, "The PSLQ Integer Relation Algorithm": http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html The present implementation uses fixed-point instead of floating-point arithmetic, since this is significantly (about 7x) faster. """ n = len(x) if n < 2: raise ValueError("n cannot be less than 2") # At too low precision, the algorithm becomes meaningless prec = ctx.prec if prec < 53: raise ValueError("prec cannot be less than 53") if verbose and prec // max(2,n) < 5: print("Warning: precision for PSLQ may be too low") target = int(prec * 0.75) if tol is None: tol = ctx.mpf(2)**(-target) else: tol = ctx.convert(tol) extra = 60 prec += extra if verbose: print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol))) tol = ctx.to_fixed(tol, prec) assert tol # Convert to fixed-point numbers. The dummy None is added so we can # use 1-based indexing. (This just allows us to be consistent with # Bailey's indexing. The algorithm is 100 lines long, so debugging # a single wrong index can be painful.) x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x] # Sanity check on magnitudes minx = min(abs(xx) for xx in x[1:]) if not minx: raise ValueError("PSLQ requires a vector of nonzero numbers") if minx < tol//100: if verbose: print("STOPPING: (one number is too small)") return None g = sqrt_fixed((4<<prec)//3, prec) A = {} B = {} H = {} # Initialization # step 1 for i in xrange(1, n+1): for j in xrange(1, n+1): A[i,j] = B[i,j] = (i==j) << prec H[i,j] = 0 # step 2 s = [None] + [0] * n for k in xrange(1, n+1): t = 0 for j in xrange(k, n+1): t += (x[j]**2 >> prec) s[k] = sqrt_fixed(t, prec) t = s[1] y = x[:] for k in xrange(1, n+1): y[k] = (x[k] << prec) // t s[k] = (s[k] << prec) // t # step 3 for i in xrange(1, n+1): for j in xrange(i+1, n): H[i,j] = 0 if i <= n-1: if s[i]: H[i,i] = (s[i+1] << prec) // s[i] else: H[i,i] = 0 for j in range(1, i): sjj1 = s[j]*s[j+1] if sjj1: H[i,j] = ((-y[i]*y[j])<<prec)//sjj1 else: H[i,j] = 0 # step 4 for i in xrange(2, n+1): for j in xrange(i-1, 0, -1): #t = floor(H[i,j]/H[j,j] + 0.5) if H[j,j]: t = round_fixed((H[i,j] << prec)//H[j,j], prec) else: #t = 0 continue y[j] = y[j] + (t*y[i] >> prec) for k in xrange(1, j+1): H[i,k] = H[i,k] - (t*H[j,k] >> prec) for k in xrange(1, n+1): A[i,k] = A[i,k] - (t*A[j,k] >> prec) B[k,j] = B[k,j] + (t*B[k,i] >> prec) # Main algorithm for REP in range(maxsteps): # Step 1 m = -1 szmax = -1 for i in range(1, n): h = H[i,i] sz = (g**i * abs(h)) >> (prec*(i-1)) if sz > szmax: m = i szmax = sz # Step 2 y[m], y[m+1] = y[m+1], y[m] tmp = {} for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i] for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i] for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m] # Step 3 if m <= n - 2: t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec) # A zero element probably indicates that the precision has # been exhausted. XXX: this could be spurious, due to # using fixed-point arithmetic if not t0: break t1 = (H[m,m] << prec) // t0 t2 = (H[m,m+1] << prec) // t0 for i in xrange(m, n+1): t3 = H[i,m] t4 = H[i,m+1] H[i,m] = (t1*t3+t2*t4) >> prec H[i,m+1] = (-t2*t3+t1*t4) >> prec # Step 4 for i in xrange(m+1, n+1): for j in xrange(min(i-1, m+1), 0, -1): try: t = round_fixed((H[i,j] << prec)//H[j,j], prec) # Precision probably exhausted except ZeroDivisionError: break y[j] = y[j] + ((t*y[i]) >> prec) for k in xrange(1, j+1): H[i,k] = H[i,k] - (t*H[j,k] >> prec) for k in xrange(1, n+1): A[i,k] = A[i,k] - (t*A[j,k] >> prec) B[k,j] = B[k,j] + (t*B[k,i] >> prec) # Until a relation is found, the error typically decreases # slowly (e.g. a factor 1-10) with each step TODO: we could # compare err from two successive iterations. If there is a # large drop (several orders of magnitude), that indicates a # "high quality" relation was detected. Reporting this to # the user somehow might be useful. best_err = maxcoeff<<prec for i in xrange(1, n+1): err = abs(y[i]) # Maybe we are done? if err < tol: # We are done if the coefficients are acceptable vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \ range(1,n+1)] if max(abs(v) for v in vec) < maxcoeff: if verbose: print("FOUND relation at iter %i/%i, error: %s" % \ (REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1))) return vec best_err = min(err, best_err) # Calculate a lower bound for the norm. We could do this # more exactly (using the Euclidean norm) but there is probably # no practical benefit. recnorm = max(abs(h) for h in H.values()) if recnorm: norm = ((1 << (2*prec)) // recnorm) >> prec norm //= 100 else: norm = ctx.inf if verbose: print("%i/%i: Error: %8s Norm: %s" % \ (REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm)) if norm >= maxcoeff: break if verbose: print("CANCELLING after step %i/%i." % (REP, maxsteps)) print("Could not find an integer relation. Norm bound: %s" % norm) return None def findpoly(ctx, x, n=1, **kwargs): r""" ``findpoly(x, n)`` returns the coefficients of an integer polynomial `P` of degree at most `n` such that `P(x) \approx 0`. If no polynomial having `x` as a root can be found, :func:`~mpmath.findpoly` returns ``None``. :func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ..., `[1, x, x^2, .., x^n]` as input. Keyword arguments given to :func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In particular, you can specify a tolerance for `P(x)` with ``tol`` and a maximum permitted coefficient size with ``maxcoeff``. For large values of `n`, it is recommended to run :func:`~mpmath.findpoly` at high precision; preferably 50 digits or more. **Examples** By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear polynomial with a rational root:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> findpoly(0.7) [-10, 7] The generated coefficient list is valid input to ``polyval`` and ``polyroots``:: >>> nprint(polyval(findpoly(phi, 2), phi), 1) -2.0e-16 >>> for r in polyroots(findpoly(phi, 2)): ... print(r) ... -0.618033988749895 1.61803398874989 Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are solutions to quadratic equations. As we find here, `1+\sqrt 2` is a root of the polynomial `x^2 - 2x - 1`:: >>> findpoly(1+sqrt(2), 2) [1, -2, -1] >>> findroot(lambda x: x**2 - 2*x - 1, 1) 2.4142135623731 Despite only containing square roots, the following number results in a polynomial of degree 4:: >>> findpoly(sqrt(2)+sqrt(3), 4) [1, 0, -10, 0, 1] In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of `r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of lower degree having `r` as a root does not exist. Given sufficient precision, :func:`~mpmath.findpoly` will usually find the correct minimal polynomial of a given algebraic number. **Non-algebraic numbers** If :func:`~mpmath.findpoly` fails to find a polynomial with given coefficient size and tolerance constraints, that means no such polynomial exists. We can verify that `\pi` is not an algebraic number of degree 3 with coefficients less than 1000:: >>> mp.dps = 15 >>> findpoly(pi, 3) >>> It is always possible to find an algebraic approximation of a number using one (or several) of the following methods: 1. Increasing the permitted degree 2. Allowing larger coefficients 3. Reducing the tolerance One example of each method is shown below:: >>> mp.dps = 15 >>> findpoly(pi, 4) [95, -545, 863, -183, -298] >>> findpoly(pi, 3, maxcoeff=10000) [836, -1734, -2658, -457] >>> findpoly(pi, 3, tol=1e-7) [-4, 22, -29, -2] It is unknown whether Euler's constant is transcendental (or even irrational). We can use :func:`~mpmath.findpoly` to check that if is an algebraic number, its minimal polynomial must have degree at least 7 and a coefficient of magnitude at least 1000000:: >>> mp.dps = 200 >>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000) >>> Note that the high precision and strict tolerance is necessary for such high-degree runs, since otherwise unwanted low-accuracy approximations will be detected. It may also be necessary to set maxsteps high to prevent a premature exit (before the coefficient bound has been reached). Running with ``verbose=True`` to get an idea what is happening can be useful. """ x = ctx.mpf(x) if n < 1: raise ValueError("n cannot be less than 1") if x == 0: return [1, 0] xs = [ctx.mpf(1)] for i in range(1,n+1): xs.append(x**i) a = ctx.pslq(xs, **kwargs) if a is not None: return a[::-1] def fracgcd(p, q): x, y = p, q while y: x, y = y, x % y if x != 1: p //= x q //= x if q == 1: return p return p, q def pslqstring(r, constants): q = r[0] r = r[1:] s = [] for i in range(len(r)): p = r[i] if p: z = fracgcd(-p,q) cs = constants[i][1] if cs == '1': cs = '' else: cs = '*' + cs if isinstance(z, int_types): if z > 0: term = str(z) + cs else: term = ("(%s)" % z) + cs else: term = ("(%s/%s)" % z) + cs s.append(term) s = ' + '.join(s) if '+' in s or '*' in s: s = '(' + s + ')' return s or '0' def prodstring(r, constants): q = r[0] r = r[1:] num = [] den = [] for i in range(len(r)): p = r[i] if p: z = fracgcd(-p,q) cs = constants[i][1] if isinstance(z, int_types): if abs(z) == 1: t = cs else: t = '%s**%s' % (cs, abs(z)) ([num,den][z<0]).append(t) else: t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1]) ([num,den][z[0]<0]).append(t) num = '*'.join(num) den = '*'.join(den) if num and den: return "(%s)/(%s)" % (num, den) if num: return num if den: return "1/(%s)" % den def quadraticstring(ctx,t,a,b,c): if c < 0: a,b,c = -a,-b,-c u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c) u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c) if abs(u1-t) < abs(u2-t): if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c) else: if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c) return s # Transformation y = f(x,c), with inverse function x = f(y,c) # The third entry indicates whether the transformation is # redundant when c = 1 transforms = [ (lambda ctx,x,c: x*c, '$y/$c', 0), (lambda ctx,x,c: x/c, '$c*$y', 1), (lambda ctx,x,c: c/x, '$c/$y', 0), (lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0), (lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1), (lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0), (lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1), (lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1), (lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1), (lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0), (lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1), (lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0), (lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1), (lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1), (lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1), (lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0), (lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1), (lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0), (lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1), (lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1), (lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0), (lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0), (lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1), (lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0), (lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1), (lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1), (lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0), ] def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False, verbose=False): """ Given a real number `x`, ``identify(x)`` attempts to find an exact formula for `x`. This formula is returned as a string. If no match is found, ``None`` is returned. With ``full=True``, a list of matching formulas is returned. As a simple example, :func:`~mpmath.identify` will find an algebraic formula for the golden ratio:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> identify(phi) '((1+sqrt(5))/2)' :func:`~mpmath.identify` can identify simple algebraic numbers and simple combinations of given base constants, as well as certain basic transformations thereof. More specifically, :func:`~mpmath.identify` looks for the following: 1. Fractions 2. Quadratic algebraic numbers 3. Rational linear combinations of the base constants 4. Any of the above after first transforming `x` into `f(x)` where `f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either directly or with `x` or `f(x)` multiplied or divided by one of the base constants 5. Products of fractional powers of the base constants and small integers Base constants can be given as a list of strings representing mpmath expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical values and use the original strings for the output), or as a dict of formula:value pairs. In order not to produce spurious results, :func:`~mpmath.identify` should be used with high precision; preferably 50 digits or more. **Examples** Simple identifications can be performed safely at standard precision. Here the default recognition of rational, algebraic, and exp/log of algebraic numbers is demonstrated:: >>> mp.dps = 15 >>> identify(0.22222222222222222) '(2/9)' >>> identify(1.9662210973805663) 'sqrt(((24+sqrt(48))/8))' >>> identify(4.1132503787829275) 'exp((sqrt(8)/2))' >>> identify(0.881373587019543) 'log(((2+sqrt(8))/2))' By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard precision it finds a not too useful approximation. At slightly increased precision, this approximation is no longer accurate enough and :func:`~mpmath.identify` more correctly returns ``None``:: >>> identify(pi) '(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))' >>> mp.dps = 30 >>> identify(pi) >>> Numbers such as `\pi`, and simple combinations of user-defined constants, can be identified if they are provided explicitly:: >>> identify(3*pi-2*e, ['pi', 'e']) '(3*pi + (-2)*e)' Here is an example using a dict of constants. Note that the constants need not be "atomic"; :func:`~mpmath.identify` can just as well express the given number in terms of expressions given by formulas:: >>> identify(pi+e, {'a':pi+2, 'b':2*e}) '((-2) + 1*a + (1/2)*b)' Next, we attempt some identifications with a set of base constants. It is necessary to increase the precision a bit. >>> mp.dps = 50 >>> base = ['sqrt(2)','pi','log(2)'] >>> identify(0.25, base) '(1/4)' >>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base) '(2*sqrt(2) + 3*pi + (5/7)*log(2))' >>> identify(exp(pi+2), base) 'exp((2 + 1*pi))' >>> identify(1/(3+sqrt(2)), base) '((3/7) + (-1/7)*sqrt(2))' >>> identify(sqrt(2)/(3*pi+4), base) 'sqrt(2)/(4 + 3*pi)' >>> identify(5**(mpf(1)/3)*pi*log(2)**2, base) '5**(1/3)*pi*log(2)**2' An example of an erroneous solution being found when too low precision is used:: >>> mp.dps = 15 >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) '((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))' >>> mp.dps = 50 >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) '1/(3*pi + (-4)*e + 2*sqrt(2))' **Finding approximate solutions** The tolerance ``tol`` defaults to 3/4 of the working precision. Lowering the tolerance is useful for finding approximate matches. We can for example try to generate approximations for pi:: >>> mp.dps = 15 >>> identify(pi, tol=1e-2) '(22/7)' >>> identify(pi, tol=1e-3) '(355/113)' >>> identify(pi, tol=1e-10) '(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))' With ``full=True``, and by supplying a few base constants, ``identify`` can generate almost endless lists of approximations for any number (the output below has been truncated to show only the first few):: >>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True): ... print(p) ... # doctest: +ELLIPSIS e/log((6 + (-4/3)*e)) (3**3*5*e*catalan**2)/(2*7**2) sqrt(((-13) + 1*e + 22*catalan)) log(((-6) + 24*e + 4*catalan)/e) exp(catalan*((-1/5) + (8/15)*e)) catalan*(6 + (-6)*e + 15*catalan) sqrt((5 + 26*e + (-3)*catalan))/e e*sqrt(((-27) + 2*e + 25*catalan)) log(((-1) + (-11)*e + 59*catalan)) ((3/20) + (21/20)*e + (3/20)*catalan) ... The numerical values are roughly as close to `\pi` as permitted by the specified tolerance: >>> e/log(6-4*e/3) 3.14157719846001 >>> 135*e*catalan**2/98 3.14166950419369 >>> sqrt(e-13+22*catalan) 3.14158000062992 >>> log(24*e-6+4*catalan)-1 3.14158791577159 **Symbolic processing** The output formula can be evaluated as a Python expression. Note however that if fractions (like '2/3') are present in the formula, Python's :func:`~mpmath.eval()` may erroneously perform integer division. Note also that the output is not necessarily in the algebraically simplest form:: >>> identify(sqrt(2)) '(sqrt(8)/2)' As a solution to both problems, consider using SymPy's :func:`~mpmath.sympify` to convert the formula into a symbolic expression. SymPy can be used to pretty-print or further simplify the formula symbolically:: >>> from sympy import sympify # doctest: +SKIP >>> sympify(identify(sqrt(2))) # doctest: +SKIP 2**(1/2) Sometimes :func:`~mpmath.identify` can simplify an expression further than a symbolic algorithm:: >>> from sympy import simplify # doctest: +SKIP >>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') # doctest: +SKIP >>> x # doctest: +SKIP (3/2 - 5**(1/2)/2)**(-1/2) >>> x = simplify(x) # doctest: +SKIP >>> x # doctest: +SKIP 2/(6 - 2*5**(1/2))**(1/2) >>> mp.dps = 30 # doctest: +SKIP >>> x = sympify(identify(x.evalf(30))) # doctest: +SKIP >>> x # doctest: +SKIP 1/2 + 5**(1/2)/2 (In fact, this functionality is available directly in SymPy as the function :func:`~mpmath.nsimplify`, which is essentially a wrapper for :func:`~mpmath.identify`.) **Miscellaneous issues and limitations** The input `x` must be a real number. All base constants must be positive real numbers and must not be rationals or rational linear combinations of each other. The worst-case computation time grows quickly with the number of base constants. Already with 3 or 4 base constants, :func:`~mpmath.identify` may require several seconds to finish. To search for relations among a large number of constants, you should consider using :func:`~mpmath.pslq` directly. The extended transformations are applied to x, not the constants separately. As a result, ``identify`` will for example be able to recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but not ``2*exp(pi)+3``. It will be able to recognize the latter if ``exp(pi)`` is given explicitly as a base constant. """ solutions = [] def addsolution(s): if verbose: print("Found: ", s) solutions.append(s) x = ctx.mpf(x) # Further along, x will be assumed positive if x == 0: if full: return ['0'] else: return '0' if x < 0: sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose) if sol is None: return sol if full: return ["-(%s)"%s for s in sol] else: return "-(%s)" % sol if tol: tol = ctx.mpf(tol) else: tol = ctx.eps**0.7 M = maxcoeff if constants: if isinstance(constants, dict): constants = [(ctx.mpf(v), name) for (name, v) in sorted(constants.items())] else: namespace = dict((name, getattr(ctx,name)) for name in dir(ctx)) constants = [(eval(p, namespace), p) for p in constants] else: constants = [] # We always want to find at least rational terms if 1 not in [value for (name, value) in constants]: constants = [(ctx.mpf(1), '1')] + constants # PSLQ with simple algebraic and functional transformations for ft, ftn, red in transforms: for c, cn in constants: if red and cn == '1': continue t = ft(ctx,x,c) # Prevent exponential transforms from wreaking havoc if abs(t) > M**2 or abs(t) < tol: continue # Linear combination of base constants r = ctx.pslq([t] + [a[0] for a in constants], tol, M) s = None if r is not None and max(abs(uw) for uw in r) <= M and r[0]: s = pslqstring(r, constants) # Quadratic algebraic numbers else: q = ctx.pslq([ctx.one, t, t**2], tol, M) if q is not None and len(q) == 3 and q[2]: aa, bb, cc = q if max(abs(aa),abs(bb),abs(cc)) <= M: s = quadraticstring(ctx,t,aa,bb,cc) if s: if cn == '1' and ('/$c' in ftn): s = ftn.replace('$y', s).replace('/$c', '') else: s = ftn.replace('$y', s).replace('$c', cn) addsolution(s) if not full: return solutions[0] if verbose: print(".") # Check for a direct multiplicative formula if x != 1: # Allow fractional powers of fractions ilogs = [2,3,5,7] # Watch out for existing fractional powers of fractions logs = [] for a, s in constants: if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs): logs.append((ctx.ln(a), s)) logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M) if r is not None and max(abs(uw) for uw in r) <= M and r[0]: addsolution(prodstring(r, logs)) if not full: return solutions[0] if full: return sorted(solutions, key=len) else: return None IdentificationMethods.pslq = pslq IdentificationMethods.findpoly = findpoly IdentificationMethods.identify = identify if __name__ == '__main__': import doctest doctest.testmod()
29,269
33.598109
94
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/rational.py
import operator import sys from .libmp import int_types, mpf_hash, bitcount, from_man_exp, HASH_MODULUS new = object.__new__ def create_reduced(p, q, _cache={}): key = p, q if key in _cache: return _cache[key] x, y = p, q while y: x, y = y, x % y if x != 1: p //= x q //= x v = new(mpq) v._mpq_ = p, q # Speedup integers, half-integers and other small fractions if q <= 4 and abs(key[0]) < 100: _cache[key] = v return v class mpq(object): """ Exact rational type, currently only intended for internal use. """ __slots__ = ["_mpq_"] def __new__(cls, p, q=1): if type(p) is tuple: p, q = p elif hasattr(p, '_mpq_'): p, q = p._mpq_ return create_reduced(p, q) def __repr__(s): return "mpq(%s,%s)" % s._mpq_ def __str__(s): return "(%s/%s)" % s._mpq_ def __int__(s): a, b = s._mpq_ return a // b def __nonzero__(s): return bool(s._mpq_[0]) __bool__ = __nonzero__ def __hash__(s): a, b = s._mpq_ if sys.version >= "3.2": inverse = pow(b, HASH_MODULUS-2, HASH_MODULUS) if not inverse: h = sys.hash_info.inf else: h = (abs(a) * inverse) % HASH_MODULUS if a < 0: h = -h if h == -1: h = -2 return h else: if b == 1: return hash(a) # Power of two: mpf compatible hash if not (b & (b-1)): return mpf_hash(from_man_exp(a, 1-bitcount(b))) return hash((a,b)) def __eq__(s, t): ttype = type(t) if ttype is mpq: return s._mpq_ == t._mpq_ if ttype in int_types: a, b = s._mpq_ if b != 1: return False return a == t return NotImplemented def __ne__(s, t): ttype = type(t) if ttype is mpq: return s._mpq_ != t._mpq_ if ttype in int_types: a, b = s._mpq_ if b != 1: return True return a != t return NotImplemented def _cmp(s, t, op): ttype = type(t) if ttype in int_types: a, b = s._mpq_ return op(a, t*b) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return op(a*d, b*c) return NotImplementedError def __lt__(s, t): return s._cmp(t, operator.lt) def __le__(s, t): return s._cmp(t, operator.le) def __gt__(s, t): return s._cmp(t, operator.gt) def __ge__(s, t): return s._cmp(t, operator.ge) def __abs__(s): a, b = s._mpq_ if a >= 0: return s v = new(mpq) v._mpq_ = -a, b return v def __neg__(s): a, b = s._mpq_ v = new(mpq) v._mpq_ = -a, b return v def __pos__(s): return s def __add__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(a*d+b*c, b*d) if ttype in int_types: a, b = s._mpq_ v = new(mpq) v._mpq_ = a+b*t, b return v return NotImplemented __radd__ = __add__ def __sub__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(a*d-b*c, b*d) if ttype in int_types: a, b = s._mpq_ v = new(mpq) v._mpq_ = a-b*t, b return v return NotImplemented def __rsub__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(b*c-a*d, b*d) if ttype in int_types: a, b = s._mpq_ v = new(mpq) v._mpq_ = b*t-a, b return v return NotImplemented def __mul__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(a*c, b*d) if ttype in int_types: a, b = s._mpq_ return create_reduced(a*t, b) return NotImplemented __rmul__ = __mul__ def __div__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(a*d, b*c) if ttype in int_types: a, b = s._mpq_ return create_reduced(a, b*t) return NotImplemented def __rdiv__(s, t): ttype = type(t) if ttype is mpq: a, b = s._mpq_ c, d = t._mpq_ return create_reduced(b*c, a*d) if ttype in int_types: a, b = s._mpq_ return create_reduced(b*t, a) return NotImplemented def __pow__(s, t): ttype = type(t) if ttype in int_types: a, b = s._mpq_ if t: if t < 0: a, b, t = b, a, -t v = new(mpq) v._mpq_ = a**t, b**t return v raise ZeroDivisionError return NotImplemented mpq_1 = mpq((1,1)) mpq_0 = mpq((0,1)) mpq_1_2 = mpq((1,2)) mpq_3_2 = mpq((3,2)) mpq_1_4 = mpq((1,4)) mpq_1_16 = mpq((1,16)) mpq_3_16 = mpq((3,16)) mpq_5_2 = mpq((5,2)) mpq_3_4 = mpq((3,4)) mpq_7_4 = mpq((7,4)) mpq_5_4 = mpq((5,4)) # Register with "numbers" ABC # We do not subclass, hence we do not use the @abstractmethod checks. While # this is less invasive it may turn out that we do not actually support # parts of the expected interfaces. See # http://docs.python.org/2/library/numbers.html for list of abstract # methods. try: import numbers numbers.Rational.register(mpq) except ImportError: pass
5,970
23.775934
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/ctx_fp.py
from .ctx_base import StandardBaseContext import math import cmath from . import math2 from . import function_docs from .libmp import mpf_bernoulli, to_float, int_types from . import libmp class FPContext(StandardBaseContext): """ Context for fast low-precision arithmetic (53-bit precision, giving at most about 15-digit accuracy), using Python's builtin float and complex. """ def __init__(ctx): StandardBaseContext.__init__(ctx) # Override SpecialFunctions implementation ctx.loggamma = math2.loggamma ctx._bernoulli_cache = {} ctx.pretty = False ctx._init_aliases() _mpq = lambda cls, x: float(x[0])/x[1] NoConvergence = libmp.NoConvergence def _get_prec(ctx): return 53 def _set_prec(ctx, p): return def _get_dps(ctx): return 15 def _set_dps(ctx, p): return _fixed_precision = True prec = property(_get_prec, _set_prec) dps = property(_get_dps, _set_dps) zero = 0.0 one = 1.0 eps = math2.EPS inf = math2.INF ninf = math2.NINF nan = math2.NAN j = 1j # Called by SpecialFunctions.__init__() @classmethod def _wrap_specfun(cls, name, f, wrap): if wrap: def f_wrapped(ctx, *args, **kwargs): convert = ctx.convert args = [convert(a) for a in args] return f(ctx, *args, **kwargs) else: f_wrapped = f f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) setattr(cls, name, f_wrapped) def bernoulli(ctx, n): cache = ctx._bernoulli_cache if n in cache: return cache[n] cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True) return cache[n] pi = math2.pi e = math2.e euler = math2.euler sqrt2 = 1.4142135623730950488 sqrt5 = 2.2360679774997896964 phi = 1.6180339887498948482 ln2 = 0.69314718055994530942 ln10 = 2.302585092994045684 euler = 0.57721566490153286061 catalan = 0.91596559417721901505 khinchin = 2.6854520010653064453 apery = 1.2020569031595942854 glaisher = 1.2824271291006226369 absmin = absmax = abs def is_special(ctx, x): return x - x != 0.0 def isnan(ctx, x): return x != x def isinf(ctx, x): return abs(x) == math2.INF def isnormal(ctx, x): if x: return x - x == 0.0 return False def isnpint(ctx, x): if type(x) is complex: if x.imag: return False x = x.real return x <= 0.0 and round(x) == x mpf = float mpc = complex def convert(ctx, x): try: return float(x) except: return complex(x) power = staticmethod(math2.pow) sqrt = staticmethod(math2.sqrt) exp = staticmethod(math2.exp) ln = log = staticmethod(math2.log) cos = staticmethod(math2.cos) sin = staticmethod(math2.sin) tan = staticmethod(math2.tan) cos_sin = staticmethod(math2.cos_sin) acos = staticmethod(math2.acos) asin = staticmethod(math2.asin) atan = staticmethod(math2.atan) cosh = staticmethod(math2.cosh) sinh = staticmethod(math2.sinh) tanh = staticmethod(math2.tanh) gamma = staticmethod(math2.gamma) rgamma = staticmethod(math2.rgamma) fac = factorial = staticmethod(math2.factorial) floor = staticmethod(math2.floor) ceil = staticmethod(math2.ceil) cospi = staticmethod(math2.cospi) sinpi = staticmethod(math2.sinpi) cbrt = staticmethod(math2.cbrt) _nthroot = staticmethod(math2.nthroot) _ei = staticmethod(math2.ei) _e1 = staticmethod(math2.e1) _zeta = _zeta_int = staticmethod(math2.zeta) # XXX: math2 def arg(ctx, z): z = complex(z) return math.atan2(z.imag, z.real) def expj(ctx, x): return ctx.exp(ctx.j*x) def expjpi(ctx, x): return ctx.exp(ctx.j*ctx.pi*x) ldexp = math.ldexp frexp = math.frexp def mag(ctx, z): if z: return ctx.frexp(abs(z))[1] return ctx.ninf def isint(ctx, z): if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 if z.imag: return False z = z.real try: return z == int(z) except: return False def nint_distance(ctx, z): if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 n = round(z.real) else: n = round(z) if n == z: return n, ctx.ninf return n, ctx.mag(abs(z-n)) def _convert_param(ctx, z): if type(z) is tuple: p, q = z return ctx.mpf(p) / q, 'R' if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 intz = int(z.real) else: intz = int(z) if z == intz: return intz, 'Z' return z, 'R' def _is_real_type(ctx, z): return isinstance(z, float) or isinstance(z, int_types) def _is_complex_type(ctx, z): return isinstance(z, complex) def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): coeffs = list(coeffs) num = range(p) den = range(p,p+q) tol = ctx.eps s = t = 1.0 k = 0 while 1: for i in num: t *= (coeffs[i]+k) for i in den: t /= (coeffs[i]+k) k += 1; t /= k; t *= z; s += t if abs(t) < tol: return s if k > maxterms: raise ctx.NoConvergence def atan2(ctx, x, y): return math.atan2(x, y) def psi(ctx, m, z): m = int(m) if m == 0: return ctx.digamma(z) return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z) digamma = staticmethod(math2.digamma) def harmonic(ctx, x): x = ctx.convert(x) if x == 0 or x == 1: return x return ctx.digamma(x+1) + ctx.euler nstr = str def to_fixed(ctx, x, prec): return int(math.ldexp(x, prec)) def rand(ctx): import random return random.random() _erf = staticmethod(math2.erf) _erfc = staticmethod(math2.erfc) def sum_accurately(ctx, terms, check_step=1): s = ctx.zero k = 0 for term in terms(): s += term if (not k % check_step) and term: if abs(term) <= 1e-18*abs(s): break k += 1 return s
6,572
24.877953
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/ctx_mp.py
""" This module defines the mpf, mpc classes, and standard functions for operating with them. """ __docformat__ = 'plaintext' import re from .ctx_base import StandardBaseContext from .libmp.backend import basestring, BACKEND from . import libmp from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, ComplexResult, to_pickable, from_pickable, normalize, from_int, from_float, from_str, to_int, to_float, to_str, from_rational, from_man_exp, fone, fzero, finf, fninf, fnan, mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, mpf_hash, mpf_rand, mpf_sum, bitcount, to_fixed, mpc_to_str, mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, mpc_mpf_div, mpf_pow, mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, mpf_glaisher, mpf_twinprime, mpf_mertens, int_types) from . import function_docs from . import rational new = object.__new__ get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?)??' r'(?P<im>[\+\-]?\d*\.?\d*(e[\+\-]?\d+)?j)?\)?$') if BACKEND == 'sage': from sage.libs.mpmath.ext_main import Context as BaseMPContext # pickle hack import sage.libs.mpmath.ext_main as _mpf_module else: from .ctx_mp_python import PythonMPContext as BaseMPContext from . import ctx_mp_python as _mpf_module from .ctx_mp_python import _mpf, _mpc, mpnumeric class MPContext(BaseMPContext, StandardBaseContext): """ Context for multiprecision arithmetic with a global precision. """ def __init__(ctx): BaseMPContext.__init__(ctx) ctx.trap_complex = False ctx.pretty = False ctx.types = [ctx.mpf, ctx.mpc, ctx.constant] ctx._mpq = rational.mpq ctx.default() StandardBaseContext.__init__(ctx) ctx.mpq = rational.mpq ctx.init_builtins() ctx.hyp_summators = {} ctx._init_aliases() # XXX: automate try: ctx.bernoulli.im_func.func_doc = function_docs.bernoulli ctx.primepi.im_func.func_doc = function_docs.primepi ctx.psi.im_func.func_doc = function_docs.psi ctx.atan2.im_func.func_doc = function_docs.atan2 except AttributeError: # python 3 ctx.bernoulli.__func__.func_doc = function_docs.bernoulli ctx.primepi.__func__.func_doc = function_docs.primepi ctx.psi.__func__.func_doc = function_docs.psi ctx.atan2.__func__.func_doc = function_docs.atan2 ctx.digamma.func_doc = function_docs.digamma ctx.cospi.func_doc = function_docs.cospi ctx.sinpi.func_doc = function_docs.sinpi def init_builtins(ctx): mpf = ctx.mpf mpc = ctx.mpc # Exact constants ctx.one = ctx.make_mpf(fone) ctx.zero = ctx.make_mpf(fzero) ctx.j = ctx.make_mpc((fzero,fone)) ctx.inf = ctx.make_mpf(finf) ctx.ninf = ctx.make_mpf(fninf) ctx.nan = ctx.make_mpf(fnan) eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1), "epsilon of working precision", "eps") ctx.eps = eps # Approximate constants ctx.pi = ctx.constant(mpf_pi, "pi", "pi") ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2") ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10") ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi") ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e") ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler") ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan") ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin") ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher") ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery") ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree") ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime") ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens") # Standard functions ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt) ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt) ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log) ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp) ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj) ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi) ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin) ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos) ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan) ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh) ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh) ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh) ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin) ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos) ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh) ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh) ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh) ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi) ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi) ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor) ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil) ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint) ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac) ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci) ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma) ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma) ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma) ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial) ctx.gamma_old = ctx._wrap_libmp_function(libmp.mpf_gamma_old, libmp.mpc_gamma_old) ctx.fac_old = ctx.factorial_old = ctx._wrap_libmp_function(libmp.mpf_factorial_old, libmp.mpc_factorial_old) ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0) ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic) ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei) ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1) ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci) ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si) ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk) ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe) ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1) ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None) ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None) ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta) ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta) # Faster versions ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt) ctx.exp = getattr(ctx, "_sage_exp", ctx.exp) ctx.ln = getattr(ctx, "_sage_ln", ctx.ln) ctx.cos = getattr(ctx, "_sage_cos", ctx.cos) ctx.sin = getattr(ctx, "_sage_sin", ctx.sin) def to_fixed(ctx, x, prec): return x.to_fixed(prec) def hypot(ctx, x, y): r""" Computes the Euclidean norm of the vector `(x, y)`, equal to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real.""" x = ctx.convert(x) y = ctx.convert(y) return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding)) def _gamma_upper_int(ctx, n, z): n = int(ctx._re(n)) if n == 0: return ctx.e1(z) if not hasattr(z, '_mpf_'): raise NotImplementedError prec, rounding = ctx._prec_rounding real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True) if imag is None: return ctx.make_mpf(real) else: return ctx.make_mpc((real, imag)) def _expint_int(ctx, n, z): n = int(n) if n == 1: return ctx.e1(z) if not hasattr(z, '_mpf_'): raise NotImplementedError prec, rounding = ctx._prec_rounding real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding) if imag is None: return ctx.make_mpf(real) else: return ctx.make_mpc((real, imag)) def _nthroot(ctx, x, n): if hasattr(x, '_mpf_'): try: return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding)) except ComplexResult: if ctx.trap_complex: raise x = (x._mpf_, libmp.fzero) else: x = x._mpc_ return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding)) def _besselj(ctx, n, z): prec, rounding = ctx._prec_rounding if hasattr(z, '_mpf_'): return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding)) elif hasattr(z, '_mpc_'): return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding)) def _agm(ctx, a, b=1): prec, rounding = ctx._prec_rounding if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'): try: v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding) return ctx.make_mpf(v) except ComplexResult: pass if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero) else: a = a._mpc_ if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero) else: b = b._mpc_ return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding)) def bernoulli(ctx, n): return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding)) def _zeta_int(ctx, n): return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding)) def atan2(ctx, y, x): x = ctx.convert(x) y = ctx.convert(y) return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding)) def psi(ctx, m, z): z = ctx.convert(z) m = int(m) if ctx._is_real_type(z): return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding)) else: return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding)) def cos_sin(ctx, x, **kwargs): if type(x) not in ctx.types: x = ctx.convert(x) prec, rounding = ctx._parse_prec(kwargs) if hasattr(x, '_mpf_'): c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding) return ctx.make_mpf(c), ctx.make_mpf(s) elif hasattr(x, '_mpc_'): c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding) return ctx.make_mpc(c), ctx.make_mpc(s) else: return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) def cospi_sinpi(ctx, x, **kwargs): if type(x) not in ctx.types: x = ctx.convert(x) prec, rounding = ctx._parse_prec(kwargs) if hasattr(x, '_mpf_'): c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding) return ctx.make_mpf(c), ctx.make_mpf(s) elif hasattr(x, '_mpc_'): c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding) return ctx.make_mpc(c), ctx.make_mpc(s) else: return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) def clone(ctx): """ Create a copy of the context, with the same working precision. """ a = ctx.__class__() a.prec = ctx.prec return a # Several helper methods # TODO: add more of these, make consistent, write docstrings, ... def _is_real_type(ctx, x): if hasattr(x, '_mpc_') or type(x) is complex: return False return True def _is_complex_type(ctx, x): if hasattr(x, '_mpc_') or type(x) is complex: return True return False def isnan(ctx, x): """ Return *True* if *x* is a NaN (not-a-number), or for a complex number, whether either the real or complex part is NaN; otherwise return *False*:: >>> from mpmath import * >>> isnan(3.14) False >>> isnan(nan) True >>> isnan(mpc(3.14,2.72)) False >>> isnan(mpc(3.14,nan)) True """ if hasattr(x, "_mpf_"): return x._mpf_ == fnan if hasattr(x, "_mpc_"): return fnan in x._mpc_ if isinstance(x, int_types) or isinstance(x, rational.mpq): return False x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isnan(x) raise TypeError("isnan() needs a number as input") def isfinite(ctx, x): """ Return *True* if *x* is a finite number, i.e. neither an infinity or a NaN. >>> from mpmath import * >>> isfinite(inf) False >>> isfinite(-inf) False >>> isfinite(3) True >>> isfinite(nan) False >>> isfinite(3+4j) True >>> isfinite(mpc(3,inf)) False >>> isfinite(mpc(nan,3)) False """ if ctx.isinf(x) or ctx.isnan(x): return False return True def isnpint(ctx, x): """ Determine if *x* is a nonpositive integer. """ if not x: return True if hasattr(x, '_mpf_'): sign, man, exp, bc = x._mpf_ return sign and exp >= 0 if hasattr(x, '_mpc_'): return not x.imag and ctx.isnpint(x.real) if type(x) in int_types: return x <= 0 if isinstance(x, ctx.mpq): p, q = x._mpq_ if not p: return True return q == 1 and p <= 0 return ctx.isnpint(ctx.convert(x)) def __str__(ctx): lines = ["Mpmath settings:", (" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]", (" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]", (" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]", ] return "\n".join(lines) @property def _repr_digits(ctx): return repr_dps(ctx._prec) @property def _str_digits(ctx): return ctx._dps def extraprec(ctx, n, normalize_output=False): """ The block with extraprec(n): <code> increases the precision n bits, executes <code>, and then restores the precision. extraprec(n)(f) returns a decorated version of the function f that increases the working precision by n bits before execution, and restores the parent precision afterwards. With normalize_output=True, it rounds the return value to the parent precision. """ return PrecisionManager(ctx, lambda p: p + n, None, normalize_output) def extradps(ctx, n, normalize_output=False): """ This function is analogous to extraprec (see documentation) but changes the decimal precision instead of the number of bits. """ return PrecisionManager(ctx, None, lambda d: d + n, normalize_output) def workprec(ctx, n, normalize_output=False): """ The block with workprec(n): <code> sets the precision to n bits, executes <code>, and then restores the precision. workprec(n)(f) returns a decorated version of the function f that sets the precision to n bits before execution, and restores the precision afterwards. With normalize_output=True, it rounds the return value to the parent precision. """ return PrecisionManager(ctx, lambda p: n, None, normalize_output) def workdps(ctx, n, normalize_output=False): """ This function is analogous to workprec (see documentation) but changes the decimal precision instead of the number of bits. """ return PrecisionManager(ctx, None, lambda d: n, normalize_output) def autoprec(ctx, f, maxprec=None, catch=(), verbose=False): """ Return a wrapped copy of *f* that repeatedly evaluates *f* with increasing precision until the result converges to the full precision used at the point of the call. This heuristically protects against rounding errors, at the cost of roughly a 2x slowdown compared to manually setting the optimal precision. This method can, however, easily be fooled if the results from *f* depend "discontinuously" on the precision, for instance if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec` should be used judiciously. **Examples** Many functions are sensitive to perturbations of the input arguments. If the arguments are decimal numbers, they may have to be converted to binary at a much higher precision. If the amount of required extra precision is unknown, :func:`~mpmath.autoprec` is convenient:: >>> from mpmath import * >>> mp.dps = 15 >>> mp.pretty = True >>> besselj(5, 125 * 10**28) # Exact input -8.03284785591801e-17 >>> besselj(5, '1.25e30') # Bad 7.12954868316652e-16 >>> autoprec(besselj)(5, '1.25e30') # Good -8.03284785591801e-17 The following fails to converge because `\sin(\pi) = 0` whereas all finite-precision approximations of `\pi` give nonzero values:: >>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... NoConvergence: autoprec: prec increased to 2910 without convergence As the following example shows, :func:`~mpmath.autoprec` can protect against cancellation, but is fooled by too severe cancellation:: >>> x = 1e-10 >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) 1.00000008274037e-10 1.00000000005e-10 1.00000000005e-10 >>> x = 1e-50 >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) 0.0 1.0e-50 0.0 With *catch*, an exception or list of exceptions to intercept may be specified. The raised exception is interpreted as signaling insufficient precision. This permits, for example, evaluating a function where a too low precision results in a division by zero:: >>> f = lambda x: 1/(exp(x)-1) >>> f(1e-30) Traceback (most recent call last): ... ZeroDivisionError >>> autoprec(f, catch=ZeroDivisionError)(1e-30) 1.0e+30 """ def f_autoprec_wrapped(*args, **kwargs): prec = ctx.prec if maxprec is None: maxprec2 = ctx._default_hyper_maxprec(prec) else: maxprec2 = maxprec try: ctx.prec = prec + 10 try: v1 = f(*args, **kwargs) except catch: v1 = ctx.nan prec2 = prec + 20 while 1: ctx.prec = prec2 try: v2 = f(*args, **kwargs) except catch: v2 = ctx.nan if v1 == v2: break err = ctx.mag(v2-v1) - ctx.mag(v2) if err < (-prec): break if verbose: print("autoprec: target=%s, prec=%s, accuracy=%s" \ % (prec, prec2, -err)) v1 = v2 if prec2 >= maxprec2: raise ctx.NoConvergence(\ "autoprec: prec increased to %i without convergence"\ % prec2) prec2 += int(prec2*2) prec2 = min(prec2, maxprec2) finally: ctx.prec = prec return +v2 return f_autoprec_wrapped def nstr(ctx, x, n=6, **kwargs): """ Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n* significant digits. The small default value for *n* is chosen to make this function useful for printing collections of numbers (lists, matrices, etc). If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively to each element. For unrecognized classes, :func:`~mpmath.nstr` simply returns ``str(x)``. The companion function :func:`~mpmath.nprint` prints the result instead of returning it. The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed* and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`. The number will be printed in fixed-point format if the position of the leading digit is strictly between min_fixed (default = min(-dps/3,-5)) and max_fixed (default = dps). To force fixed-point format always, set min_fixed = -inf, max_fixed = +inf. To force floating-point format, set min_fixed >= max_fixed. >>> from mpmath import * >>> nstr([+pi, ldexp(1,-500)]) '[3.14159, 3.05494e-151]' >>> nprint([+pi, ldexp(1,-500)]) [3.14159, 3.05494e-151] >>> nstr(mpf("5e-10"), 5) '5.0e-10' >>> nstr(mpf("5e-10"), 5, strip_zeros=False) '5.0000e-10' >>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11) '0.00000000050000' >>> nstr(mpf(0), 5, show_zero_exponent=True) '0.0e+0' """ if isinstance(x, list): return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) if isinstance(x, tuple): return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) if hasattr(x, '_mpf_'): return to_str(x._mpf_, n, **kwargs) if hasattr(x, '_mpc_'): return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")" if isinstance(x, basestring): return repr(x) if isinstance(x, ctx.matrix): return x.__nstr__(n, **kwargs) return str(x) def _convert_fallback(ctx, x, strings): if strings and isinstance(x, basestring): if 'j' in x.lower(): x = x.lower().replace(' ', '') match = get_complex.match(x) re = match.group('re') if not re: re = 0 im = match.group('im').rstrip('j') return ctx.mpc(ctx.convert(re), ctx.convert(im)) if hasattr(x, "_mpi_"): a, b = x._mpi_ if a == b: return ctx.make_mpf(a) else: raise ValueError("can only create mpf from zero-width interval") raise TypeError("cannot create mpf from " + repr(x)) def mpmathify(ctx, *args, **kwargs): return ctx.convert(*args, **kwargs) def _parse_prec(ctx, kwargs): if kwargs: if kwargs.get('exact'): return 0, 'f' prec, rounding = ctx._prec_rounding if 'rounding' in kwargs: rounding = kwargs['rounding'] if 'prec' in kwargs: prec = kwargs['prec'] if prec == ctx.inf: return 0, 'f' else: prec = int(prec) elif 'dps' in kwargs: dps = kwargs['dps'] if dps == ctx.inf: return 0, 'f' prec = dps_to_prec(dps) return prec, rounding return ctx._prec_rounding _exact_overflow_msg = "the exact result does not fit in memory" _hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy using a working precision of %i bits. Try with a higher maxprec, maxterms, or set zeroprec.""" def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs): if hasattr(z, "_mpf_"): key = p, q, flags, 'R' v = z._mpf_ elif hasattr(z, "_mpc_"): key = p, q, flags, 'C' v = z._mpc_ if key not in ctx.hyp_summators: ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1] summator = ctx.hyp_summators[key] prec = ctx.prec maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec)) extraprec = 50 epsshift = 25 # Jumps in magnitude occur when parameters are close to negative # integers. We must ensure that these terms are included in # the sum and added accurately magnitude_check = {} max_total_jump = 0 for i, c in enumerate(coeffs): if flags[i] == 'Z': if i >= p and c <= 0: ok = False for ii, cc in enumerate(coeffs[:p]): # Note: c <= cc or c < cc, depending on convention if flags[ii] == 'Z' and cc <= 0 and c <= cc: ok = True if not ok: raise ZeroDivisionError("pole in hypergeometric series") continue n, d = ctx.nint_distance(c) n = -int(n) d = -d if i >= p and n >= 0 and d > 4: if n in magnitude_check: magnitude_check[n] += d else: magnitude_check[n] = d extraprec = max(extraprec, d - prec + 60) max_total_jump += abs(d) while 1: if extraprec > maxprec: raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec)) wp = prec + extraprec if magnitude_check: mag_dict = dict((n,None) for n in magnitude_check) else: mag_dict = {} zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \ epsshift, mag_dict, **kwargs) cancel = -magnitude jumps_resolved = True if extraprec < max_total_jump: for n in mag_dict.values(): if (n is None) or (n < prec): jumps_resolved = False break accurate = (cancel < extraprec-25-5 or not accurate_small) if jumps_resolved: if accurate: break # zero? zeroprec = kwargs.get('zeroprec') if zeroprec is not None: if cancel > zeroprec: if have_complex: return ctx.mpc(0) else: return ctx.zero # Some near-singularities were not included, so increase # precision and repeat until they are extraprec *= 2 # Possible workaround for bad roundoff in fixed-point arithmetic epsshift += 5 extraprec += 5 if type(zv) is tuple: if have_complex: return ctx.make_mpc(zv) else: return ctx.make_mpf(zv) else: return zv def ldexp(ctx, x, n): r""" Computes `x 2^n` efficiently. No rounding is performed. The argument `x` must be a real floating-point number (or possible to convert into one) and `n` must be a Python ``int``. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> ldexp(1, 10) mpf('1024.0') >>> ldexp(1, -3) mpf('0.125') """ x = ctx.convert(x) return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n)) def frexp(ctx, x): r""" Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`, `n` a Python integer, and such that `x = y 2^n`. No rounding is performed. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> frexp(7.5) (mpf('0.9375'), 3) """ x = ctx.convert(x) y, n = libmp.mpf_frexp(x._mpf_) return ctx.make_mpf(y), n def fneg(ctx, x, **kwargs): """ Negates the number *x*, giving a floating-point result, optionally using a custom precision and rounding mode. See the documentation of :func:`~mpmath.fadd` for a detailed description of how to specify precision and rounding. **Examples** An mpmath number is returned:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fneg(2.5) mpf('-2.5') >>> fneg(-5+2j) mpc(real='5.0', imag='-2.0') Precise control over rounding is possible:: >>> x = fadd(2, 1e-100, exact=True) >>> fneg(x) mpf('-2.0') >>> fneg(x, rounding='f') mpf('-2.0000000000000004') Negating with and without roundoff:: >>> n = 200000000000000000000001 >>> print(int(-mpf(n))) -200000000000000016777216 >>> print(int(fneg(n))) -200000000000000016777216 >>> print(int(fneg(n, prec=log(n,2)+1))) -200000000000000000000001 >>> print(int(fneg(n, dps=log(n,10)+1))) -200000000000000000000001 >>> print(int(fneg(n, prec=inf))) -200000000000000000000001 >>> print(int(fneg(n, dps=inf))) -200000000000000000000001 >>> print(int(fneg(n, exact=True))) -200000000000000000000001 """ prec, rounding = ctx._parse_prec(kwargs) x = ctx.convert(x) if hasattr(x, '_mpf_'): return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding)) if hasattr(x, '_mpc_'): return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding)) raise ValueError("Arguments need to be mpf or mpc compatible numbers") def fadd(ctx, x, y, **kwargs): """ Adds the numbers *x* and *y*, giving a floating-point result, optionally using a custom precision and rounding mode. The default precision is the working precision of the context. You can specify a custom precision in bits by passing the *prec* keyword argument, or by providing an equivalent decimal precision with the *dps* keyword argument. If the precision is set to ``+inf``, or if the flag *exact=True* is passed, an exact addition with no rounding is performed. When the precision is finite, the optional *rounding* keyword argument specifies the direction of rounding. Valid options are ``'n'`` for nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'`` for down, ``'u'`` for up. **Examples** Using :func:`~mpmath.fadd` with precision and rounding control:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fadd(2, 1e-20) mpf('2.0') >>> fadd(2, 1e-20, rounding='u') mpf('2.0000000000000004') >>> nprint(fadd(2, 1e-20, prec=100), 25) 2.00000000000000000001 >>> nprint(fadd(2, 1e-20, dps=15), 25) 2.0 >>> nprint(fadd(2, 1e-20, dps=25), 25) 2.00000000000000000001 >>> nprint(fadd(2, 1e-20, exact=True), 25) 2.00000000000000000001 Exact addition avoids cancellation errors, enforcing familiar laws of numbers such as `x+y-x = y`, which don't hold in floating-point arithmetic with finite precision:: >>> x, y = mpf(2), mpf('1e-1000') >>> print(x + y - x) 0.0 >>> print(fadd(x, y, prec=inf) - x) 1.0e-1000 >>> print(fadd(x, y, exact=True) - x) 1.0e-1000 Exact addition can be inefficient and may be impossible to perform with large magnitude differences:: >>> fadd(1, '1e-100000000000000000000', prec=inf) Traceback (most recent call last): ... OverflowError: the exact result does not fit in memory """ prec, rounding = ctx._parse_prec(kwargs) x = ctx.convert(x) y = ctx.convert(y) try: if hasattr(x, '_mpf_'): if hasattr(y, '_mpf_'): return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding)) if hasattr(x, '_mpc_'): if hasattr(y, '_mpf_'): return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding)) except (ValueError, OverflowError): raise OverflowError(ctx._exact_overflow_msg) raise ValueError("Arguments need to be mpf or mpc compatible numbers") def fsub(ctx, x, y, **kwargs): """ Subtracts the numbers *x* and *y*, giving a floating-point result, optionally using a custom precision and rounding mode. See the documentation of :func:`~mpmath.fadd` for a detailed description of how to specify precision and rounding. **Examples** Using :func:`~mpmath.fsub` with precision and rounding control:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fsub(2, 1e-20) mpf('2.0') >>> fsub(2, 1e-20, rounding='d') mpf('1.9999999999999998') >>> nprint(fsub(2, 1e-20, prec=100), 25) 1.99999999999999999999 >>> nprint(fsub(2, 1e-20, dps=15), 25) 2.0 >>> nprint(fsub(2, 1e-20, dps=25), 25) 1.99999999999999999999 >>> nprint(fsub(2, 1e-20, exact=True), 25) 1.99999999999999999999 Exact subtraction avoids cancellation errors, enforcing familiar laws of numbers such as `x-y+y = x`, which don't hold in floating-point arithmetic with finite precision:: >>> x, y = mpf(2), mpf('1e1000') >>> print(x - y + y) 0.0 >>> print(fsub(x, y, prec=inf) + y) 2.0 >>> print(fsub(x, y, exact=True) + y) 2.0 Exact addition can be inefficient and may be impossible to perform with large magnitude differences:: >>> fsub(1, '1e-100000000000000000000', prec=inf) Traceback (most recent call last): ... OverflowError: the exact result does not fit in memory """ prec, rounding = ctx._parse_prec(kwargs) x = ctx.convert(x) y = ctx.convert(y) try: if hasattr(x, '_mpf_'): if hasattr(y, '_mpf_'): return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding)) if hasattr(x, '_mpc_'): if hasattr(y, '_mpf_'): return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding)) except (ValueError, OverflowError): raise OverflowError(ctx._exact_overflow_msg) raise ValueError("Arguments need to be mpf or mpc compatible numbers") def fmul(ctx, x, y, **kwargs): """ Multiplies the numbers *x* and *y*, giving a floating-point result, optionally using a custom precision and rounding mode. See the documentation of :func:`~mpmath.fadd` for a detailed description of how to specify precision and rounding. **Examples** The result is an mpmath number:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fmul(2, 5.0) mpf('10.0') >>> fmul(0.5j, 0.5) mpc(real='0.0', imag='0.25') Avoiding roundoff:: >>> x, y = 10**10+1, 10**15+1 >>> print(x*y) 10000000001000010000000001 >>> print(mpf(x) * mpf(y)) 1.0000000001e+25 >>> print(int(mpf(x) * mpf(y))) 10000000001000011026399232 >>> print(int(fmul(x, y))) 10000000001000011026399232 >>> print(int(fmul(x, y, dps=25))) 10000000001000010000000001 >>> print(int(fmul(x, y, exact=True))) 10000000001000010000000001 Exact multiplication with complex numbers can be inefficient and may be impossible to perform with large magnitude differences between real and imaginary parts:: >>> x = 1+2j >>> y = mpc(2, '1e-100000000000000000000') >>> fmul(x, y) mpc(real='2.0', imag='4.0') >>> fmul(x, y, rounding='u') mpc(real='2.0', imag='4.0000000000000009') >>> fmul(x, y, exact=True) Traceback (most recent call last): ... OverflowError: the exact result does not fit in memory """ prec, rounding = ctx._parse_prec(kwargs) x = ctx.convert(x) y = ctx.convert(y) try: if hasattr(x, '_mpf_'): if hasattr(y, '_mpf_'): return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding)) if hasattr(x, '_mpc_'): if hasattr(y, '_mpf_'): return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding)) except (ValueError, OverflowError): raise OverflowError(ctx._exact_overflow_msg) raise ValueError("Arguments need to be mpf or mpc compatible numbers") def fdiv(ctx, x, y, **kwargs): """ Divides the numbers *x* and *y*, giving a floating-point result, optionally using a custom precision and rounding mode. See the documentation of :func:`~mpmath.fadd` for a detailed description of how to specify precision and rounding. **Examples** The result is an mpmath number:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fdiv(3, 2) mpf('1.5') >>> fdiv(2, 3) mpf('0.66666666666666663') >>> fdiv(2+4j, 0.5) mpc(real='4.0', imag='8.0') The rounding direction and precision can be controlled:: >>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits mpf('0.6666259765625') >>> fdiv(2, 3, rounding='d') mpf('0.66666666666666663') >>> fdiv(2, 3, prec=60) mpf('0.66666666666666667') >>> fdiv(2, 3, rounding='u') mpf('0.66666666666666674') Checking the error of a division by performing it at higher precision:: >>> fdiv(2, 3) - fdiv(2, 3, prec=100) mpf('-3.7007434154172148e-17') Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not allowed since the quotient of two floating-point numbers generally does not have an exact floating-point representation. (In the future this might be changed to allow the case where the division is actually exact.) >>> fdiv(2, 3, exact=True) Traceback (most recent call last): ... ValueError: division is not an exact operation """ prec, rounding = ctx._parse_prec(kwargs) if not prec: raise ValueError("division is not an exact operation") x = ctx.convert(x) y = ctx.convert(y) if hasattr(x, '_mpf_'): if hasattr(y, '_mpf_'): return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding)) if hasattr(x, '_mpc_'): if hasattr(y, '_mpf_'): return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding)) if hasattr(y, '_mpc_'): return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding)) raise ValueError("Arguments need to be mpf or mpc compatible numbers") def nint_distance(ctx, x): r""" Return `(n,d)` where `n` is the nearest integer to `x` and `d` is an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision (measured in bits) lost to cancellation when computing `x-n`. >>> from mpmath import * >>> n, d = nint_distance(5) >>> print(n); print(d) 5 -inf >>> n, d = nint_distance(mpf(5)) >>> print(n); print(d) 5 -inf >>> n, d = nint_distance(mpf(5.00000001)) >>> print(n); print(d) 5 -26 >>> n, d = nint_distance(mpf(4.99999999)) >>> print(n); print(d) 5 -26 >>> n, d = nint_distance(mpc(5,10)) >>> print(n); print(d) 5 4 >>> n, d = nint_distance(mpc(5,0.000001)) >>> print(n); print(d) 5 -19 """ typx = type(x) if typx in int_types: return int(x), ctx.ninf elif typx is rational.mpq: p, q = x._mpq_ n, r = divmod(p, q) if 2*r >= q: n += 1 elif not r: return n, ctx.ninf # log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q) d = bitcount(abs(p-n*q)) - bitcount(q) return n, d if hasattr(x, "_mpf_"): re = x._mpf_ im_dist = ctx.ninf elif hasattr(x, "_mpc_"): re, im = x._mpc_ isign, iman, iexp, ibc = im if iman: im_dist = iexp + ibc elif im == fzero: im_dist = ctx.ninf else: raise ValueError("requires a finite number") else: x = ctx.convert(x) if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): return ctx.nint_distance(x) else: raise TypeError("requires an mpf/mpc") sign, man, exp, bc = re mag = exp+bc # |x| < 0.5 if mag < 0: n = 0 re_dist = mag elif man: # exact integer if exp >= 0: n = man << exp re_dist = ctx.ninf # exact half-integer elif exp == -1: n = (man>>1)+1 re_dist = 0 else: d = (-exp-1) t = man >> d if t & 1: t += 1 man = (t<<d) - man else: man -= (t<<d) n = t>>1 # int(t)>>1 re_dist = exp+bitcount(man) if sign: n = -n elif re == fzero: re_dist = ctx.ninf n = 0 else: raise ValueError("requires a finite number") return n, max(re_dist, im_dist) def fprod(ctx, factors): r""" Calculates a product containing a finite number of factors (for infinite products, see :func:`~mpmath.nprod`). The factors will be converted to mpmath numbers. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fprod([1, 2, 0.5, 7]) mpf('7.0') """ orig = ctx.prec try: v = ctx.one for p in factors: v *= p finally: ctx.prec = orig return +v def rand(ctx): """ Returns an ``mpf`` with value chosen randomly from `[0, 1)`. The number of randomly generated bits in the mantissa is equal to the working precision. """ return ctx.make_mpf(mpf_rand(ctx._prec)) def fraction(ctx, p, q): """ Given Python integers `(p, q)`, returns a lazy ``mpf`` representing the fraction `p/q`. The value is updated with the precision. >>> from mpmath import * >>> mp.dps = 15 >>> a = fraction(1,100) >>> b = mpf(1)/100 >>> print(a); print(b) 0.01 0.01 >>> mp.dps = 30 >>> print(a); print(b) # a will be accurate 0.01 0.0100000000000000002081668171172 >>> mp.dps = 15 """ return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd), '%s/%s' % (p, q)) def absmin(ctx, x): return abs(ctx.convert(x)) def absmax(ctx, x): return abs(ctx.convert(x)) def _as_points(ctx, x): # XXX: remove this? if hasattr(x, '_mpi_'): a, b = x._mpi_ return [ctx.make_mpf(a), ctx.make_mpf(b)] return x ''' def _zetasum(ctx, s, a, b): """ Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small integers. """ a = int(a) b = int(b) s = ctx.convert(s) prec, rounding = ctx._prec_rounding if hasattr(s, '_mpf_'): v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec)) elif hasattr(s, '_mpc_'): v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec)) return v ''' def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False): if not (ctx.isint(a) and hasattr(s, "_mpc_")): raise NotImplementedError a = int(a) prec = ctx._prec xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec) xs = [ctx.make_mpc(x) for x in xs] ys = [ctx.make_mpc(y) for y in ys] return xs, ys class PrecisionManager: def __init__(self, ctx, precfun, dpsfun, normalize_output=False): self.ctx = ctx self.precfun = precfun self.dpsfun = dpsfun self.normalize_output = normalize_output def __call__(self, f): def g(*args, **kwargs): orig = self.ctx.prec try: if self.precfun: self.ctx.prec = self.precfun(self.ctx.prec) else: self.ctx.dps = self.dpsfun(self.ctx.dps) if self.normalize_output: v = f(*args, **kwargs) if type(v) is tuple: return tuple([+a for a in v]) return +v else: return f(*args, **kwargs) finally: self.ctx.prec = orig g.__name__ = f.__name__ g.__doc__ = f.__doc__ return g def __enter__(self): self.origp = self.ctx.prec if self.precfun: self.ctx.prec = self.precfun(self.ctx.prec) else: self.ctx.dps = self.dpsfun(self.ctx.dps) def __exit__(self, exc_type, exc_val, exc_tb): self.ctx.prec = self.origp return False if __name__ == '__main__': import doctest doctest.testmod()
49,671
36.041014
116
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/ctx_mp_python.py
#from ctx_base import StandardBaseContext from .libmp.backend import basestring, exec_ from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, ComplexResult, to_pickable, from_pickable, normalize, from_int, from_float, from_str, to_int, to_float, to_str, from_rational, from_man_exp, fone, fzero, finf, fninf, fnan, mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, mpf_hash, mpf_rand, mpf_sum, bitcount, to_fixed, mpc_to_str, mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, mpc_mpf_div, mpf_pow, mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, mpf_glaisher, mpf_twinprime, mpf_mertens, int_types) from . import rational from . import function_docs new = object.__new__ class mpnumeric(object): """Base class for mpf and mpc.""" __slots__ = [] def __new__(cls, val): raise NotImplementedError class _mpf(mpnumeric): """ An mpf instance holds a real-valued floating-point number. mpf:s work analogously to Python floats, but support arbitrary-precision arithmetic. """ __slots__ = ['_mpf_'] def __new__(cls, val=fzero, **kwargs): """A new mpf can be created from a Python float, an int, a or a decimal string representing a number in floating-point format.""" prec, rounding = cls.context._prec_rounding if kwargs: prec = kwargs.get('prec', prec) if 'dps' in kwargs: prec = dps_to_prec(kwargs['dps']) rounding = kwargs.get('rounding', rounding) if type(val) is cls: sign, man, exp, bc = val._mpf_ if (not man) and exp: return val v = new(cls) v._mpf_ = normalize(sign, man, exp, bc, prec, rounding) return v elif type(val) is tuple: if len(val) == 2: v = new(cls) v._mpf_ = from_man_exp(val[0], val[1], prec, rounding) return v if len(val) == 4: sign, man, exp, bc = val v = new(cls) v._mpf_ = normalize(sign, MPZ(man), exp, bc, prec, rounding) return v raise ValueError else: v = new(cls) v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding) return v @classmethod def mpf_convert_arg(cls, x, prec, rounding): if isinstance(x, int_types): return from_int(x) if isinstance(x, float): return from_float(x) if isinstance(x, basestring): return from_str(x, prec, rounding) if isinstance(x, cls.context.constant): return x.func(prec, rounding) if hasattr(x, '_mpf_'): return x._mpf_ if hasattr(x, '_mpmath_'): t = cls.context.convert(x._mpmath_(prec, rounding)) if hasattr(t, '_mpf_'): return t._mpf_ if hasattr(x, '_mpi_'): a, b = x._mpi_ if a == b: return a raise ValueError("can only create mpf from zero-width interval") raise TypeError("cannot create mpf from " + repr(x)) @classmethod def mpf_convert_rhs(cls, x): if isinstance(x, int_types): return from_int(x) if isinstance(x, float): return from_float(x) if isinstance(x, complex_types): return cls.context.mpc(x) if isinstance(x, rational.mpq): p, q = x._mpq_ return from_rational(p, q, cls.context.prec) if hasattr(x, '_mpf_'): return x._mpf_ if hasattr(x, '_mpmath_'): t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding)) if hasattr(t, '_mpf_'): return t._mpf_ return t return NotImplemented @classmethod def mpf_convert_lhs(cls, x): x = cls.mpf_convert_rhs(x) if type(x) is tuple: return cls.context.make_mpf(x) return x man_exp = property(lambda self: self._mpf_[1:3]) man = property(lambda self: self._mpf_[1]) exp = property(lambda self: self._mpf_[2]) bc = property(lambda self: self._mpf_[3]) real = property(lambda self: self) imag = property(lambda self: self.context.zero) conjugate = lambda self: self def __getstate__(self): return to_pickable(self._mpf_) def __setstate__(self, val): self._mpf_ = from_pickable(val) def __repr__(s): if s.context.pretty: return str(s) return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits) def __str__(s): return to_str(s._mpf_, s.context._str_digits) def __hash__(s): return mpf_hash(s._mpf_) def __int__(s): return int(to_int(s._mpf_)) def __long__(s): return long(to_int(s._mpf_)) def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1]) def __complex__(s): return complex(float(s)) def __nonzero__(s): return s._mpf_ != fzero __bool__ = __nonzero__ def __abs__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_abs(s._mpf_, prec, rounding) return v def __pos__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_pos(s._mpf_, prec, rounding) return v def __neg__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpf_ = mpf_neg(s._mpf_, prec, rounding) return v def _cmp(s, t, func): if hasattr(t, '_mpf_'): t = t._mpf_ else: t = s.mpf_convert_rhs(t) if t is NotImplemented: return t return func(s._mpf_, t) def __cmp__(s, t): return s._cmp(t, mpf_cmp) def __lt__(s, t): return s._cmp(t, mpf_lt) def __gt__(s, t): return s._cmp(t, mpf_gt) def __le__(s, t): return s._cmp(t, mpf_le) def __ge__(s, t): return s._cmp(t, mpf_ge) def __ne__(s, t): v = s.__eq__(t) if v is NotImplemented: return v return not v def __rsub__(s, t): cls, new, (prec, rounding) = s._ctxdata if type(t) in int_types: v = new(cls) v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding) return v t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t - s def __rdiv__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding) return v t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t / s def __rpow__(s, t): t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t ** s def __rmod__(s, t): t = s.mpf_convert_lhs(t) if t is NotImplemented: return t return t % s def sqrt(s): return s.context.sqrt(s) def ae(s, t, rel_eps=None, abs_eps=None): return s.context.almosteq(s, t, rel_eps, abs_eps) def to_fixed(self, prec): return to_fixed(self._mpf_, prec) def __round__(self, *args): return round(float(self), *args) mpf_binary_op = """ def %NAME%(self, other): mpf, new, (prec, rounding) = self._ctxdata sval = self._mpf_ if hasattr(other, '_mpf_'): tval = other._mpf_ %WITH_MPF% ttype = type(other) if ttype in int_types: %WITH_INT% elif ttype is float: tval = from_float(other) %WITH_MPF% elif hasattr(other, '_mpc_'): tval = other._mpc_ mpc = type(other) %WITH_MPC% elif ttype is complex: tval = from_float(other.real), from_float(other.imag) mpc = self.context.mpc %WITH_MPC% if isinstance(other, mpnumeric): return NotImplemented try: other = mpf.context.convert(other, strings=False) except TypeError: return NotImplemented return self.%NAME%(other) """ return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj" return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj" mpf_pow_same = """ try: val = mpf_pow(sval, tval, prec, rounding) %s except ComplexResult: if mpf.context.trap_complex: raise mpc = mpf.context.mpc val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s """ % (return_mpf, return_mpc) def binary_op(name, with_mpf='', with_int='', with_mpc=''): code = mpf_binary_op code = code.replace("%WITH_INT%", with_int) code = code.replace("%WITH_MPC%", with_mpc) code = code.replace("%WITH_MPF%", with_mpf) code = code.replace("%NAME%", name) np = {} exec_(code, globals(), np) return np[name] _mpf.__eq__ = binary_op('__eq__', 'return mpf_eq(sval, tval)', 'return mpf_eq(sval, from_int(other))', 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)') _mpf.__add__ = binary_op('__add__', 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc) _mpf.__sub__ = binary_op('__sub__', 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc) _mpf.__mul__ = binary_op('__mul__', 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf, 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc) _mpf.__div__ = binary_op('__div__', 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf, 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc) _mpf.__mod__ = binary_op('__mod__', 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf, 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf, 'raise NotImplementedError("complex modulo")') _mpf.__pow__ = binary_op('__pow__', mpf_pow_same, 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf, 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc) _mpf.__radd__ = _mpf.__add__ _mpf.__rmul__ = _mpf.__mul__ _mpf.__truediv__ = _mpf.__div__ _mpf.__rtruediv__ = _mpf.__rdiv__ class _constant(_mpf): """Represents a mathematical constant with dynamic precision. When printed or used in an arithmetic operation, a constant is converted to a regular mpf at the working precision. A regular mpf can also be obtained using the operation +x.""" def __new__(cls, func, name, docname=''): a = object.__new__(cls) a.name = name a.func = func a.__doc__ = getattr(function_docs, docname, '') return a def __call__(self, prec=None, dps=None, rounding=None): prec2, rounding2 = self.context._prec_rounding if not prec: prec = prec2 if not rounding: rounding = rounding2 if dps: prec = dps_to_prec(dps) return self.context.make_mpf(self.func(prec, rounding)) @property def _mpf_(self): prec, rounding = self.context._prec_rounding return self.func(prec, rounding) def __repr__(self): return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15))) class _mpc(mpnumeric): """ An mpc represents a complex number using a pair of mpf:s (one for the real part and another for the imaginary part.) The mpc class behaves fairly similarly to Python's complex type. """ __slots__ = ['_mpc_'] def __new__(cls, real=0, imag=0): s = object.__new__(cls) if isinstance(real, complex_types): real, imag = real.real, real.imag elif hasattr(real, '_mpc_'): s._mpc_ = real._mpc_ return s real = cls.context.mpf(real) imag = cls.context.mpf(imag) s._mpc_ = (real._mpf_, imag._mpf_) return s real = property(lambda self: self.context.make_mpf(self._mpc_[0])) imag = property(lambda self: self.context.make_mpf(self._mpc_[1])) def __getstate__(self): return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1]) def __setstate__(self, val): self._mpc_ = from_pickable(val[0]), from_pickable(val[1]) def __repr__(s): if s.context.pretty: return str(s) r = repr(s.real)[4:-1] i = repr(s.imag)[4:-1] return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i) def __str__(s): return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits) def __complex__(s): return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1]) def __pos__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_pos(s._mpc_, prec, rounding) return v def __abs__(s): prec, rounding = s.context._prec_rounding v = new(s.context.mpf) v._mpf_ = mpc_abs(s._mpc_, prec, rounding) return v def __neg__(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_neg(s._mpc_, prec, rounding) return v def conjugate(s): cls, new, (prec, rounding) = s._ctxdata v = new(cls) v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding) return v def __nonzero__(s): return mpc_is_nonzero(s._mpc_) __bool__ = __nonzero__ def __hash__(s): return mpc_hash(s._mpc_) @classmethod def mpc_convert_lhs(cls, x): try: y = cls.context.convert(x) return y except TypeError: return NotImplemented def __eq__(s, t): if not hasattr(t, '_mpc_'): if isinstance(t, str): return False t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return s.real == t.real and s.imag == t.imag def __ne__(s, t): b = s.__eq__(t) if b is NotImplemented: return b return not b def _compare(*args): raise TypeError("no ordering relation is defined for complex numbers") __gt__ = _compare __le__ = _compare __gt__ = _compare __ge__ = _compare def __add__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding) return v def __sub__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding) return v def __mul__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding) return v t = s.mpc_convert_lhs(t) v = new(cls) v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding) return v def __div__(s, t): cls, new, (prec, rounding) = s._ctxdata if not hasattr(t, '_mpc_'): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t if hasattr(t, '_mpf_'): v = new(cls) v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding) return v v = new(cls) v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding) return v def __pow__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t v = new(cls) if hasattr(t, '_mpf_'): v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding) else: v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding) return v __radd__ = __add__ def __rsub__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t - s def __rmul__(s, t): cls, new, (prec, rounding) = s._ctxdata if isinstance(t, int_types): v = new(cls) v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) return v t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t * s def __rdiv__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t / s def __rpow__(s, t): t = s.mpc_convert_lhs(t) if t is NotImplemented: return t return t ** s __truediv__ = __div__ __rtruediv__ = __rdiv__ def ae(s, t, rel_eps=None, abs_eps=None): return s.context.almosteq(s, t, rel_eps, abs_eps) complex_types = (complex, _mpc) class PythonMPContext(object): def __init__(ctx): ctx._prec_rounding = [53, round_nearest] ctx.mpf = type('mpf', (_mpf,), {}) ctx.mpc = type('mpc', (_mpc,), {}) ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding] ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding] ctx.mpf.context = ctx ctx.mpc.context = ctx ctx.constant = type('constant', (_constant,), {}) ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding] ctx.constant.context = ctx def make_mpf(ctx, v): a = new(ctx.mpf) a._mpf_ = v return a def make_mpc(ctx, v): a = new(ctx.mpc) a._mpc_ = v return a def default(ctx): ctx._prec = ctx._prec_rounding[0] = 53 ctx._dps = 15 ctx.trap_complex = False def _set_prec(ctx, n): ctx._prec = ctx._prec_rounding[0] = max(1, int(n)) ctx._dps = prec_to_dps(n) def _set_dps(ctx, n): ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n) ctx._dps = max(1, int(n)) prec = property(lambda ctx: ctx._prec, _set_prec) dps = property(lambda ctx: ctx._dps, _set_dps) def convert(ctx, x, strings=True): """ Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``, ``mpc``, ``int``, ``float``, ``complex``, the conversion will be performed losslessly. If *x* is a string, the result will be rounded to the present working precision. Strings representing fractions or complex numbers are permitted. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> mpmathify(3.5) mpf('3.5') >>> mpmathify('2.1') mpf('2.1000000000000001') >>> mpmathify('3/4') mpf('0.75') >>> mpmathify('2+3j') mpc(real='2.0', imag='3.0') """ if type(x) in ctx.types: return x if isinstance(x, int_types): return ctx.make_mpf(from_int(x)) if isinstance(x, float): return ctx.make_mpf(from_float(x)) if isinstance(x, complex): return ctx.make_mpc((from_float(x.real), from_float(x.imag))) prec, rounding = ctx._prec_rounding if isinstance(x, rational.mpq): p, q = x._mpq_ return ctx.make_mpf(from_rational(p, q, prec)) if strings and isinstance(x, basestring): try: _mpf_ = from_str(x, prec, rounding) return ctx.make_mpf(_mpf_) except ValueError: pass if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_) if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_) if hasattr(x, '_mpmath_'): return ctx.convert(x._mpmath_(prec, rounding)) return ctx._convert_fallback(x, strings) def isnan(ctx, x): """ Return *True* if *x* is a NaN (not-a-number), or for a complex number, whether either the real or complex part is NaN; otherwise return *False*:: >>> from mpmath import * >>> isnan(3.14) False >>> isnan(nan) True >>> isnan(mpc(3.14,2.72)) False >>> isnan(mpc(3.14,nan)) True """ if hasattr(x, "_mpf_"): return x._mpf_ == fnan if hasattr(x, "_mpc_"): return fnan in x._mpc_ if isinstance(x, int_types) or isinstance(x, rational.mpq): return False x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isnan(x) raise TypeError("isnan() needs a number as input") def isinf(ctx, x): """ Return *True* if the absolute value of *x* is infinite; otherwise return *False*:: >>> from mpmath import * >>> isinf(inf) True >>> isinf(-inf) True >>> isinf(3) False >>> isinf(3+4j) False >>> isinf(mpc(3,inf)) True >>> isinf(mpc(inf,3)) True """ if hasattr(x, "_mpf_"): return x._mpf_ in (finf, fninf) if hasattr(x, "_mpc_"): re, im = x._mpc_ return re in (finf, fninf) or im in (finf, fninf) if isinstance(x, int_types) or isinstance(x, rational.mpq): return False x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isinf(x) raise TypeError("isinf() needs a number as input") def isnormal(ctx, x): """ Determine whether *x* is "normal" in the sense of floating-point representation; that is, return *False* if *x* is zero, an infinity or NaN; otherwise return *True*. By extension, a complex number *x* is considered "normal" if its magnitude is normal:: >>> from mpmath import * >>> isnormal(3) True >>> isnormal(0) False >>> isnormal(inf); isnormal(-inf); isnormal(nan) False False False >>> isnormal(0+0j) False >>> isnormal(0+3j) True >>> isnormal(mpc(2,nan)) False """ if hasattr(x, "_mpf_"): return bool(x._mpf_[1]) if hasattr(x, "_mpc_"): re, im = x._mpc_ re_normal = bool(re[1]) im_normal = bool(im[1]) if re == fzero: return im_normal if im == fzero: return re_normal return re_normal and im_normal if isinstance(x, int_types) or isinstance(x, rational.mpq): return bool(x) x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isnormal(x) raise TypeError("isnormal() needs a number as input") def isint(ctx, x, gaussian=False): """ Return *True* if *x* is integer-valued; otherwise return *False*:: >>> from mpmath import * >>> isint(3) True >>> isint(mpf(3)) True >>> isint(3.2) False >>> isint(inf) False Optionally, Gaussian integers can be checked for:: >>> isint(3+0j) True >>> isint(3+2j) False >>> isint(3+2j, gaussian=True) True """ if isinstance(x, int_types): return True if hasattr(x, "_mpf_"): sign, man, exp, bc = xval = x._mpf_ return bool((man and exp >= 0) or xval == fzero) if hasattr(x, "_mpc_"): re, im = x._mpc_ rsign, rman, rexp, rbc = re isign, iman, iexp, ibc = im re_isint = (rman and rexp >= 0) or re == fzero if gaussian: im_isint = (iman and iexp >= 0) or im == fzero return re_isint and im_isint return re_isint and im == fzero if isinstance(x, rational.mpq): p, q = x._mpq_ return p % q == 0 x = ctx.convert(x) if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): return ctx.isint(x, gaussian) raise TypeError("isint() needs a number as input") def fsum(ctx, terms, absolute=False, squared=False): """ Calculates a sum containing a finite number of terms (for infinite series, see :func:`~mpmath.nsum`). The terms will be converted to mpmath numbers. For len(terms) > 2, this function is generally faster and produces more accurate results than the builtin Python function :func:`sum`. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> fsum([1, 2, 0.5, 7]) mpf('10.5') With squared=True each term is squared, and with absolute=True the absolute value of each term is used. """ prec, rnd = ctx._prec_rounding real = [] imag = [] other = 0 for term in terms: reval = imval = 0 if hasattr(term, "_mpf_"): reval = term._mpf_ elif hasattr(term, "_mpc_"): reval, imval = term._mpc_ else: term = ctx.convert(term) if hasattr(term, "_mpf_"): reval = term._mpf_ elif hasattr(term, "_mpc_"): reval, imval = term._mpc_ else: if absolute: term = ctx.absmax(term) if squared: term = term**2 other += term continue if imval: if squared: if absolute: real.append(mpf_mul(reval,reval)) real.append(mpf_mul(imval,imval)) else: reval, imval = mpc_pow_int((reval,imval),2,prec+10) real.append(reval) imag.append(imval) elif absolute: real.append(mpc_abs((reval,imval), prec)) else: real.append(reval) imag.append(imval) else: if squared: reval = mpf_mul(reval, reval) elif absolute: reval = mpf_abs(reval) real.append(reval) s = mpf_sum(real, prec, rnd, absolute) if imag: s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) else: s = ctx.make_mpf(s) if other is 0: return s else: return s + other def fdot(ctx, A, B=None, conjugate=False): r""" Computes the dot product of the iterables `A` and `B`, .. math :: \sum_{k=0} A_k B_k. Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs. In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent. The elements are automatically converted to mpmath numbers. With ``conjugate=True``, the elements in the second vector will be conjugated: .. math :: \sum_{k=0} A_k \overline{B_k} **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> A = [2, 1.5, 3] >>> B = [1, -1, 2] >>> fdot(A, B) mpf('6.5') >>> list(zip(A, B)) [(2, 1), (1.5, -1), (3, 2)] >>> fdot(_) mpf('6.5') >>> A = [2, 1.5, 3j] >>> B = [1+j, 3, -1-j] >>> fdot(A, B) mpc(real='9.5', imag='-1.0') >>> fdot(A, B, conjugate=True) mpc(real='3.5', imag='-5.0') """ if B: A = zip(A, B) prec, rnd = ctx._prec_rounding real = [] imag = [] other = 0 hasattr_ = hasattr types = (ctx.mpf, ctx.mpc) for a, b in A: if type(a) not in types: a = ctx.convert(a) if type(b) not in types: b = ctx.convert(b) a_real = hasattr_(a, "_mpf_") b_real = hasattr_(b, "_mpf_") if a_real and b_real: real.append(mpf_mul(a._mpf_, b._mpf_)) continue a_complex = hasattr_(a, "_mpc_") b_complex = hasattr_(b, "_mpc_") if a_real and b_complex: aval = a._mpf_ bre, bim = b._mpc_ if conjugate: bim = mpf_neg(bim) real.append(mpf_mul(aval, bre)) imag.append(mpf_mul(aval, bim)) elif b_real and a_complex: are, aim = a._mpc_ bval = b._mpf_ real.append(mpf_mul(are, bval)) imag.append(mpf_mul(aim, bval)) elif a_complex and b_complex: #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20) are, aim = a._mpc_ bre, bim = b._mpc_ if conjugate: bim = mpf_neg(bim) real.append(mpf_mul(are, bre)) real.append(mpf_neg(mpf_mul(aim, bim))) imag.append(mpf_mul(are, bim)) imag.append(mpf_mul(aim, bre)) else: if conjugate: other += a*ctx.conj(b) else: other += a*b s = mpf_sum(real, prec, rnd) if imag: s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) else: s = ctx.make_mpf(s) if other is 0: return s else: return s + other def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc="<no doc>"): """ Given a low-level mpf_ function, and optionally similar functions for mpc_ and mpi_, defines the function as a context method. It is assumed that the return type is the same as that of the input; the exception is that propagation from mpf to mpc is possible by raising ComplexResult. """ def f(x, **kwargs): if type(x) not in ctx.types: x = ctx.convert(x) prec, rounding = ctx._prec_rounding if kwargs: prec = kwargs.get('prec', prec) if 'dps' in kwargs: prec = dps_to_prec(kwargs['dps']) rounding = kwargs.get('rounding', rounding) if hasattr(x, '_mpf_'): try: return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding)) except ComplexResult: # Handle propagation to complex if ctx.trap_complex: raise return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding)) elif hasattr(x, '_mpc_'): return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding)) raise NotImplementedError("%s of a %s" % (name, type(x))) name = mpf_f.__name__[4:] f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc) return f # Called by SpecialFunctions.__init__() @classmethod def _wrap_specfun(cls, name, f, wrap): if wrap: def f_wrapped(ctx, *args, **kwargs): convert = ctx.convert args = [convert(a) for a in args] prec = ctx.prec try: ctx.prec += 10 retval = f(ctx, *args, **kwargs) finally: ctx.prec = prec return +retval else: f_wrapped = f f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) setattr(cls, name, f_wrapped) def _convert_param(ctx, x): if hasattr(x, "_mpc_"): v, im = x._mpc_ if im != fzero: return x, 'C' elif hasattr(x, "_mpf_"): v = x._mpf_ else: if type(x) in int_types: return int(x), 'Z' p = None if isinstance(x, tuple): p, q = x elif hasattr(x, '_mpq_'): p, q = x._mpq_ elif isinstance(x, basestring) and '/' in x: p, q = x.split('/') p = int(p) q = int(q) if p is not None: if not p % q: return p // q, 'Z' return ctx.mpq(p,q), 'Q' x = ctx.convert(x) if hasattr(x, "_mpc_"): v, im = x._mpc_ if im != fzero: return x, 'C' elif hasattr(x, "_mpf_"): v = x._mpf_ else: return x, 'U' sign, man, exp, bc = v if man: if exp >= -4: if sign: man = -man if exp >= 0: return int(man) << exp, 'Z' if exp >= -4: p, q = int(man), (1<<(-exp)) return ctx.mpq(p,q), 'Q' x = ctx.make_mpf(v) return x, 'R' elif not exp: return 0, 'Z' else: return x, 'U' def _mpf_mag(ctx, x): sign, man, exp, bc = x if man: return exp+bc if x == fzero: return ctx.ninf if x == finf or x == fninf: return ctx.inf return ctx.nan def mag(ctx, x): """ Quick logarithmic magnitude estimate of a number. Returns an integer or infinity `m` such that `|x| <= 2^m`. It is not guaranteed that `m` is an optimal bound, but it will never be too large by more than 2 (and probably not more than 1). **Examples** >>> from mpmath import * >>> mp.pretty = True >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2))) (4, 4, 4, 4) >>> mag(10j), mag(10+10j) (4, 5) >>> mag(0.01), int(ceil(log(0.01,2))) (-6, -6) >>> mag(0), mag(inf), mag(-inf), mag(nan) (-inf, +inf, +inf, nan) """ if hasattr(x, "_mpf_"): return ctx._mpf_mag(x._mpf_) elif hasattr(x, "_mpc_"): r, i = x._mpc_ if r == fzero: return ctx._mpf_mag(i) if i == fzero: return ctx._mpf_mag(r) return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i)) elif isinstance(x, int_types): if x: return bitcount(abs(x)) return ctx.ninf elif isinstance(x, rational.mpq): p, q = x._mpq_ if p: return 1 + bitcount(abs(p)) - bitcount(q) return ctx.ninf else: x = ctx.convert(x) if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): return ctx.mag(x) else: raise TypeError("requires an mpf/mpc") # Register with "numbers" ABC # We do not subclass, hence we do not use the @abstractmethod checks. While # this is less invasive it may turn out that we do not actually support # parts of the expected interfaces. See # http://docs.python.org/2/library/numbers.html for list of abstract # methods. try: import numbers numbers.Complex.register(_mpc) numbers.Real.register(_mpf) except ImportError: pass
37,210
31.555556
87
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/ctx_iv.py
import operator from . import libmp from .libmp.backend import basestring from .libmp import ( int_types, MPZ_ONE, prec_to_dps, dps_to_prec, repr_dps, round_floor, round_ceiling, fzero, finf, fninf, fnan, mpf_le, mpf_neg, from_int, from_float, from_str, from_rational, mpi_mid, mpi_delta, mpi_str, mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub, mpi_mul, mpi_div, mpi_pow_int, mpi_pow, mpi_from_str, mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow, mpci_abs, mpci_pow, mpci_exp, mpci_log, ComplexResult, mpf_hash, mpc_hash) mpi_zero = (fzero, fzero) from .ctx_base import StandardBaseContext new = object.__new__ def convert_mpf_(x, prec, rounding): if hasattr(x, "_mpf_"): return x._mpf_ if isinstance(x, int_types): return from_int(x, prec, rounding) if isinstance(x, float): return from_float(x, prec, rounding) if isinstance(x, basestring): return from_str(x, prec, rounding) class ivmpf(object): """ Interval arithmetic class. Precision is controlled by iv.prec. """ def __new__(cls, x=0): return cls.ctx.convert(x) def __int__(self): a, b = self._mpi_ if a == b: return int(libmp.to_int(a)) raise ValueError def __hash__(self): a, b = self._mpi_ if a == b: return mpf_hash(a) else: return hash(self._mpi_) @property def real(self): return self @property def imag(self): return self.ctx.zero def conjugate(self): return self @property def a(self): a, b = self._mpi_ return self.ctx.make_mpf((a, a)) @property def b(self): a, b = self._mpi_ return self.ctx.make_mpf((b, b)) @property def mid(self): ctx = self.ctx v = mpi_mid(self._mpi_, ctx.prec) return ctx.make_mpf((v, v)) @property def delta(self): ctx = self.ctx v = mpi_delta(self._mpi_, ctx.prec) return ctx.make_mpf((v,v)) @property def _mpci_(self): return self._mpi_, mpi_zero def _compare(*args): raise TypeError("no ordering relation is defined for intervals") __gt__ = _compare __le__ = _compare __gt__ = _compare __ge__ = _compare def __contains__(self, t): t = self.ctx.mpf(t) return (self.a <= t.a) and (t.b <= self.b) def __str__(self): return mpi_str(self._mpi_, self.ctx.prec) def __repr__(self): if self.ctx.pretty: return str(self) a, b = self._mpi_ n = repr_dps(self.ctx.prec) a = libmp.to_str(a, n) b = libmp.to_str(b, n) return "mpi(%r, %r)" % (a, b) def _compare(s, t, cmpfun): if not hasattr(t, "_mpi_"): try: t = s.ctx.convert(t) except: return NotImplemented return cmpfun(s._mpi_, t._mpi_) def __eq__(s, t): return s._compare(t, libmp.mpi_eq) def __ne__(s, t): return s._compare(t, libmp.mpi_ne) def __lt__(s, t): return s._compare(t, libmp.mpi_lt) def __le__(s, t): return s._compare(t, libmp.mpi_le) def __gt__(s, t): return s._compare(t, libmp.mpi_gt) def __ge__(s, t): return s._compare(t, libmp.mpi_ge) def __abs__(self): return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec)) def __pos__(self): return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec)) def __neg__(self): return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec)) def ae(s, t, rel_eps=None, abs_eps=None): return s.ctx.almosteq(s, t, rel_eps, abs_eps) class ivmpc(object): def __new__(cls, re=0, im=0): re = cls.ctx.convert(re) im = cls.ctx.convert(im) y = new(cls) y._mpci_ = re._mpi_, im._mpi_ return y def __hash__(self): (a, b), (c,d) = self._mpci_ if a == b and c == d: return mpc_hash((a, c)) else: return hash(self._mpci_) def __repr__(s): if s.ctx.pretty: return str(s) return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag)) def __str__(s): return "(%s + %s*j)" % (str(s.real), str(s.imag)) @property def a(self): (a, b), (c,d) = self._mpci_ return self.ctx.make_mpf((a, a)) @property def b(self): (a, b), (c,d) = self._mpci_ return self.ctx.make_mpf((b, b)) @property def c(self): (a, b), (c,d) = self._mpci_ return self.ctx.make_mpf((c, c)) @property def d(self): (a, b), (c,d) = self._mpci_ return self.ctx.make_mpf((d, d)) @property def real(s): return s.ctx.make_mpf(s._mpci_[0]) @property def imag(s): return s.ctx.make_mpf(s._mpci_[1]) def conjugate(s): a, b = s._mpci_ return s.ctx.make_mpc((a, mpf_neg(b))) def overlap(s, t): t = s.ctx.convert(t) real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b) imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d) return real_overlap and imag_overlap def __contains__(s, t): t = s.ctx.convert(t) return t.real in s.real and t.imag in s.imag def _compare(s, t, ne=False): if not isinstance(t, s.ctx._types): try: t = s.ctx.convert(t) except: return NotImplemented if hasattr(t, '_mpi_'): tval = t._mpi_, mpi_zero elif hasattr(t, '_mpci_'): tval = t._mpci_ if ne: return s._mpci_ != tval return s._mpci_ == tval def __eq__(s, t): return s._compare(t) def __ne__(s, t): return s._compare(t, True) def __lt__(s, t): raise TypeError("complex intervals cannot be ordered") __le__ = __gt__ = __ge__ = __lt__ def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec)) def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec)) def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec)) def ae(s, t, rel_eps=None, abs_eps=None): return s.ctx.almosteq(s, t, rel_eps, abs_eps) def _binary_op(f_real, f_complex): def g_complex(ctx, sval, tval): return ctx.make_mpc(f_complex(sval, tval, ctx.prec)) def g_real(ctx, sval, tval): try: return ctx.make_mpf(f_real(sval, tval, ctx.prec)) except ComplexResult: sval = (sval, mpi_zero) tval = (tval, mpi_zero) return g_complex(ctx, sval, tval) def lop_real(s, t): ctx = s.ctx if not isinstance(t, ctx._types): t = ctx.convert(t) if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_) if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_) return NotImplemented def rop_real(s, t): ctx = s.ctx if not isinstance(t, ctx._types): t = ctx.convert(t) if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_) if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero)) return NotImplemented def lop_complex(s, t): ctx = s.ctx if not isinstance(t, s.ctx._types): try: t = s.ctx.convert(t) except (ValueError, TypeError): return NotImplemented return g_complex(ctx, s._mpci_, t._mpci_) def rop_complex(s, t): ctx = s.ctx if not isinstance(t, s.ctx._types): t = s.ctx.convert(t) return g_complex(ctx, t._mpci_, s._mpci_) return lop_real, rop_real, lop_complex, rop_complex ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add) ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub) ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul) ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div) ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow) ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__ ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__ class ivmpf_constant(ivmpf): def __new__(cls, f): self = new(cls) self._f = f return self def _get_mpi_(self): prec = self.ctx._prec[0] a = self._f(prec, round_floor) b = self._f(prec, round_ceiling) return a, b _mpi_ = property(_get_mpi_) class MPIntervalContext(StandardBaseContext): def __init__(ctx): ctx.mpf = type('ivmpf', (ivmpf,), {}) ctx.mpc = type('ivmpc', (ivmpc,), {}) ctx._types = (ctx.mpf, ctx.mpc) ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {}) ctx._prec = [53] ctx._set_prec(53) ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec] ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx ctx.pretty = False StandardBaseContext.__init__(ctx) ctx._init_builtins() def _mpi(ctx, a, b=None): if b is None: return ctx.mpf(a) return ctx.mpf((a,b)) def _init_builtins(ctx): ctx.one = ctx.mpf(1) ctx.zero = ctx.mpf(0) ctx.inf = ctx.mpf('inf') ctx.ninf = -ctx.inf ctx.nan = ctx.mpf('nan') ctx.j = ctx.mpc(0,1) ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp) ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt) ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log) ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos) ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin) ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan) ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma) ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma) ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma) ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial) ctx.fac = ctx.factorial ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1)) ctx.pi = ctx._constant(libmp.mpf_pi) ctx.e = ctx._constant(libmp.mpf_e) ctx.ln2 = ctx._constant(libmp.mpf_ln2) ctx.ln10 = ctx._constant(libmp.mpf_ln10) ctx.phi = ctx._constant(libmp.mpf_phi) ctx.euler = ctx._constant(libmp.mpf_euler) ctx.catalan = ctx._constant(libmp.mpf_catalan) ctx.glaisher = ctx._constant(libmp.mpf_glaisher) ctx.khinchin = ctx._constant(libmp.mpf_khinchin) ctx.twinprime = ctx._constant(libmp.mpf_twinprime) def _wrap_mpi_function(ctx, f_real, f_complex=None): def g(x, **kwargs): if kwargs: prec = kwargs.get('prec', ctx._prec[0]) else: prec = ctx._prec[0] x = ctx.convert(x) if hasattr(x, "_mpi_"): return ctx.make_mpf(f_real(x._mpi_, prec)) if hasattr(x, "_mpci_"): return ctx.make_mpc(f_complex(x._mpci_, prec)) raise ValueError return g @classmethod def _wrap_specfun(cls, name, f, wrap): if wrap: def f_wrapped(ctx, *args, **kwargs): convert = ctx.convert args = [convert(a) for a in args] prec = ctx.prec try: ctx.prec += 10 retval = f(ctx, *args, **kwargs) finally: ctx.prec = prec return +retval else: f_wrapped = f setattr(cls, name, f_wrapped) def _set_prec(ctx, n): ctx._prec[0] = max(1, int(n)) ctx._dps = prec_to_dps(n) def _set_dps(ctx, n): ctx._prec[0] = dps_to_prec(n) ctx._dps = max(1, int(n)) prec = property(lambda ctx: ctx._prec[0], _set_prec) dps = property(lambda ctx: ctx._dps, _set_dps) def make_mpf(ctx, v): a = new(ctx.mpf) a._mpi_ = v return a def make_mpc(ctx, v): a = new(ctx.mpc) a._mpci_ = v return a def _mpq(ctx, pq): p, q = pq a = libmp.from_rational(p, q, ctx.prec, round_floor) b = libmp.from_rational(p, q, ctx.prec, round_ceiling) return ctx.make_mpf((a, b)) def convert(ctx, x): if isinstance(x, (ctx.mpf, ctx.mpc)): return x if isinstance(x, ctx._constant): return +x if isinstance(x, complex) or hasattr(x, "_mpc_"): re = ctx.convert(x.real) im = ctx.convert(x.imag) return ctx.mpc(re,im) if isinstance(x, basestring): v = mpi_from_str(x, ctx.prec) return ctx.make_mpf(v) if hasattr(x, "_mpi_"): a, b = x._mpi_ else: try: a, b = x except (TypeError, ValueError): a = b = x if hasattr(a, "_mpi_"): a = a._mpi_[0] else: a = convert_mpf_(a, ctx.prec, round_floor) if hasattr(b, "_mpi_"): b = b._mpi_[1] else: b = convert_mpf_(b, ctx.prec, round_ceiling) if a == fnan or b == fnan: a = fninf b = finf assert mpf_le(a, b), "endpoints must be properly ordered" return ctx.make_mpf((a, b)) def nstr(ctx, x, n=5, **kwargs): x = ctx.convert(x) if hasattr(x, "_mpi_"): return libmp.mpi_to_str(x._mpi_, n, **kwargs) if hasattr(x, "_mpci_"): re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs) im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs) return "(%s + %s*j)" % (re, im) def mag(ctx, x): x = ctx.convert(x) if isinstance(x, ctx.mpc): return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1 a, b = libmp.mpi_abs(x._mpi_) sign, man, exp, bc = b if man: return exp+bc if b == fzero: return ctx.ninf if b == fnan: return ctx.nan return ctx.inf def isnan(ctx, x): return False def isinf(ctx, x): return x == ctx.inf def isint(ctx, x): x = ctx.convert(x) a, b = x._mpi_ if a == b: sign, man, exp, bc = a if man: return exp >= 0 return a == fzero return None def ldexp(ctx, x, n): a, b = ctx.convert(x)._mpi_ a = libmp.mpf_shift(a, n) b = libmp.mpf_shift(b, n) return ctx.make_mpf((a,b)) def absmin(ctx, x): return abs(ctx.convert(x)).a def absmax(ctx, x): return abs(ctx.convert(x)).b def atan2(ctx, y, x): y = ctx.convert(y)._mpi_ x = ctx.convert(x)._mpi_ return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec)) def _convert_param(ctx, x): if isinstance(x, libmp.int_types): return x, 'Z' if isinstance(x, tuple): p, q = x return (ctx.mpf(p) / ctx.mpf(q), 'R') x = ctx.convert(x) if isinstance(x, ctx.mpf): return x, 'R' if isinstance(x, ctx.mpc): return x, 'C' raise ValueError def _is_real_type(ctx, z): return isinstance(z, ctx.mpf) or isinstance(z, int_types) def _is_complex_type(ctx, z): return isinstance(z, ctx.mpc) def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): coeffs = list(coeffs) num = range(p) den = range(p,p+q) #tol = ctx.eps s = t = ctx.one k = 0 while 1: for i in num: t *= (coeffs[i]+k) for i in den: t /= (coeffs[i]+k) k += 1; t /= k; t *= z; s += t if t == 0: return s #if abs(t) < tol: # return s if k > maxterms: raise ctx.NoConvergence # Register with "numbers" ABC # We do not subclass, hence we do not use the @abstractmethod checks. While # this is less invasive it may turn out that we do not actually support # parts of the expected interfaces. See # http://docs.python.org/2/library/numbers.html for list of abstract # methods. try: import numbers numbers.Complex.register(ivmpc) numbers.Real.register(ivmpf) except ImportError: pass
16,798
30.166976
111
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/__init__.py
__version__ = '1.0.0' from .usertools import monitor, timing from .ctx_fp import FPContext from .ctx_mp import MPContext from .ctx_iv import MPIntervalContext fp = FPContext() mp = MPContext() iv = MPIntervalContext() fp._mp = mp mp._mp = mp iv._mp = mp mp._fp = fp fp._fp = fp mp._iv = iv fp._iv = iv iv._iv = iv # XXX: extremely bad pickle hack from . import ctx_mp as _ctx_mp _ctx_mp._mpf_module.mpf = mp.mpf _ctx_mp._mpf_module.mpc = mp.mpc make_mpf = mp.make_mpf make_mpc = mp.make_mpc extraprec = mp.extraprec extradps = mp.extradps workprec = mp.workprec workdps = mp.workdps autoprec = mp.autoprec maxcalls = mp.maxcalls memoize = mp.memoize mag = mp.mag bernfrac = mp.bernfrac qfrom = mp.qfrom mfrom = mp.mfrom kfrom = mp.kfrom taufrom = mp.taufrom qbarfrom = mp.qbarfrom ellipfun = mp.ellipfun jtheta = mp.jtheta kleinj = mp.kleinj qp = mp.qp qhyper = mp.qhyper qgamma = mp.qgamma qfac = mp.qfac nint_distance = mp.nint_distance plot = mp.plot cplot = mp.cplot splot = mp.splot odefun = mp.odefun jacobian = mp.jacobian findroot = mp.findroot multiplicity = mp.multiplicity isinf = mp.isinf isnan = mp.isnan isnormal = mp.isnormal isint = mp.isint isfinite = mp.isfinite almosteq = mp.almosteq nan = mp.nan rand = mp.rand absmin = mp.absmin absmax = mp.absmax fraction = mp.fraction linspace = mp.linspace arange = mp.arange mpmathify = convert = mp.convert mpc = mp.mpc mpi = iv._mpi nstr = mp.nstr nprint = mp.nprint chop = mp.chop fneg = mp.fneg fadd = mp.fadd fsub = mp.fsub fmul = mp.fmul fdiv = mp.fdiv fprod = mp.fprod quad = mp.quad quadgl = mp.quadgl quadts = mp.quadts quadosc = mp.quadosc invertlaplace = mp.invertlaplace invlaptalbot = mp.invlaptalbot invlapstehfest = mp.invlapstehfest invlapdehoog = mp.invlapdehoog pslq = mp.pslq identify = mp.identify findpoly = mp.findpoly richardson = mp.richardson shanks = mp.shanks levin = mp.levin cohen_alt = mp.cohen_alt nsum = mp.nsum nprod = mp.nprod difference = mp.difference diff = mp.diff diffs = mp.diffs diffs_prod = mp.diffs_prod diffs_exp = mp.diffs_exp diffun = mp.diffun differint = mp.differint taylor = mp.taylor pade = mp.pade polyval = mp.polyval polyroots = mp.polyroots fourier = mp.fourier fourierval = mp.fourierval sumem = mp.sumem sumap = mp.sumap chebyfit = mp.chebyfit limit = mp.limit matrix = mp.matrix eye = mp.eye diag = mp.diag zeros = mp.zeros ones = mp.ones hilbert = mp.hilbert randmatrix = mp.randmatrix swap_row = mp.swap_row extend = mp.extend norm = mp.norm mnorm = mp.mnorm lu_solve = mp.lu_solve lu = mp.lu qr = mp.qr unitvector = mp.unitvector inverse = mp.inverse residual = mp.residual qr_solve = mp.qr_solve cholesky = mp.cholesky cholesky_solve = mp.cholesky_solve det = mp.det cond = mp.cond hessenberg = mp.hessenberg schur = mp.schur eig = mp.eig eig_sort = mp.eig_sort eigsy = mp.eigsy eighe = mp.eighe eigh = mp.eigh svd_r = mp.svd_r svd_c = mp.svd_c svd = mp.svd gauss_quadrature = mp.gauss_quadrature expm = mp.expm sqrtm = mp.sqrtm powm = mp.powm logm = mp.logm sinm = mp.sinm cosm = mp.cosm mpf = mp.mpf j = mp.j exp = mp.exp expj = mp.expj expjpi = mp.expjpi ln = mp.ln im = mp.im re = mp.re inf = mp.inf ninf = mp.ninf sign = mp.sign eps = mp.eps pi = mp.pi ln2 = mp.ln2 ln10 = mp.ln10 phi = mp.phi e = mp.e euler = mp.euler catalan = mp.catalan khinchin = mp.khinchin glaisher = mp.glaisher apery = mp.apery degree = mp.degree twinprime = mp.twinprime mertens = mp.mertens ldexp = mp.ldexp frexp = mp.frexp fsum = mp.fsum fdot = mp.fdot sqrt = mp.sqrt cbrt = mp.cbrt exp = mp.exp ln = mp.ln log = mp.log log10 = mp.log10 power = mp.power cos = mp.cos sin = mp.sin tan = mp.tan cosh = mp.cosh sinh = mp.sinh tanh = mp.tanh acos = mp.acos asin = mp.asin atan = mp.atan asinh = mp.asinh acosh = mp.acosh atanh = mp.atanh sec = mp.sec csc = mp.csc cot = mp.cot sech = mp.sech csch = mp.csch coth = mp.coth asec = mp.asec acsc = mp.acsc acot = mp.acot asech = mp.asech acsch = mp.acsch acoth = mp.acoth cospi = mp.cospi sinpi = mp.sinpi sinc = mp.sinc sincpi = mp.sincpi cos_sin = mp.cos_sin cospi_sinpi = mp.cospi_sinpi fabs = mp.fabs re = mp.re im = mp.im conj = mp.conj floor = mp.floor ceil = mp.ceil nint = mp.nint frac = mp.frac root = mp.root nthroot = mp.nthroot hypot = mp.hypot fmod = mp.fmod ldexp = mp.ldexp frexp = mp.frexp sign = mp.sign arg = mp.arg phase = mp.phase polar = mp.polar rect = mp.rect degrees = mp.degrees radians = mp.radians atan2 = mp.atan2 fib = mp.fib fibonacci = mp.fibonacci lambertw = mp.lambertw zeta = mp.zeta altzeta = mp.altzeta gamma = mp.gamma rgamma = mp.rgamma factorial = mp.factorial fac = mp.fac fac2 = mp.fac2 beta = mp.beta betainc = mp.betainc psi = mp.psi #psi0 = mp.psi0 #psi1 = mp.psi1 #psi2 = mp.psi2 #psi3 = mp.psi3 polygamma = mp.polygamma digamma = mp.digamma #trigamma = mp.trigamma #tetragamma = mp.tetragamma #pentagamma = mp.pentagamma harmonic = mp.harmonic bernoulli = mp.bernoulli bernfrac = mp.bernfrac stieltjes = mp.stieltjes hurwitz = mp.hurwitz dirichlet = mp.dirichlet bernpoly = mp.bernpoly eulerpoly = mp.eulerpoly eulernum = mp.eulernum polylog = mp.polylog clsin = mp.clsin clcos = mp.clcos gammainc = mp.gammainc gammaprod = mp.gammaprod binomial = mp.binomial rf = mp.rf ff = mp.ff hyper = mp.hyper hyp0f1 = mp.hyp0f1 hyp1f1 = mp.hyp1f1 hyp1f2 = mp.hyp1f2 hyp2f1 = mp.hyp2f1 hyp2f2 = mp.hyp2f2 hyp2f0 = mp.hyp2f0 hyp2f3 = mp.hyp2f3 hyp3f2 = mp.hyp3f2 hyperu = mp.hyperu hypercomb = mp.hypercomb meijerg = mp.meijerg appellf1 = mp.appellf1 appellf2 = mp.appellf2 appellf3 = mp.appellf3 appellf4 = mp.appellf4 hyper2d = mp.hyper2d bihyper = mp.bihyper erf = mp.erf erfc = mp.erfc erfi = mp.erfi erfinv = mp.erfinv npdf = mp.npdf ncdf = mp.ncdf expint = mp.expint e1 = mp.e1 ei = mp.ei li = mp.li ci = mp.ci si = mp.si chi = mp.chi shi = mp.shi fresnels = mp.fresnels fresnelc = mp.fresnelc airyai = mp.airyai airybi = mp.airybi airyaizero = mp.airyaizero airybizero = mp.airybizero scorergi = mp.scorergi scorerhi = mp.scorerhi ellipk = mp.ellipk ellipe = mp.ellipe ellipf = mp.ellipf ellippi = mp.ellippi elliprc = mp.elliprc elliprj = mp.elliprj elliprf = mp.elliprf elliprd = mp.elliprd elliprg = mp.elliprg agm = mp.agm jacobi = mp.jacobi chebyt = mp.chebyt chebyu = mp.chebyu legendre = mp.legendre legenp = mp.legenp legenq = mp.legenq hermite = mp.hermite pcfd = mp.pcfd pcfu = mp.pcfu pcfv = mp.pcfv pcfw = mp.pcfw gegenbauer = mp.gegenbauer laguerre = mp.laguerre spherharm = mp.spherharm besselj = mp.besselj j0 = mp.j0 j1 = mp.j1 besseli = mp.besseli bessely = mp.bessely besselk = mp.besselk besseljzero = mp.besseljzero besselyzero = mp.besselyzero hankel1 = mp.hankel1 hankel2 = mp.hankel2 struveh = mp.struveh struvel = mp.struvel angerj = mp.angerj webere = mp.webere lommels1 = mp.lommels1 lommels2 = mp.lommels2 whitm = mp.whitm whitw = mp.whitw ber = mp.ber bei = mp.bei ker = mp.ker kei = mp.kei coulombc = mp.coulombc coulombf = mp.coulombf coulombg = mp.coulombg barnesg = mp.barnesg superfac = mp.superfac hyperfac = mp.hyperfac loggamma = mp.loggamma siegeltheta = mp.siegeltheta siegelz = mp.siegelz grampoint = mp.grampoint zetazero = mp.zetazero riemannr = mp.riemannr primepi = mp.primepi primepi2 = mp.primepi2 primezeta = mp.primezeta bell = mp.bell polyexp = mp.polyexp expm1 = mp.expm1 powm1 = mp.powm1 unitroots = mp.unitroots cyclotomic = mp.cyclotomic mangoldt = mp.mangoldt secondzeta = mp.secondzeta nzeros = mp.nzeros backlunds = mp.backlunds lerchphi = mp.lerchphi stirling1 = mp.stirling1 stirling2 = mp.stirling2 # be careful when changing this name, don't use test*! def runtests(): """ Run all mpmath tests and print output. """ import os.path from inspect import getsourcefile from .tests import runtests as tests testdir = os.path.dirname(os.path.abspath(getsourcefile(tests))) importdir = os.path.abspath(testdir + '/../..') tests.testit(importdir, testdir) def doctests(filter=[]): try: import psyco; psyco.full() except ImportError: pass import sys from timeit import default_timer as clock for i, arg in enumerate(sys.argv): if '__init__.py' in arg: filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")] break import doctest globs = globals().copy() for obj in globs: #sorted(globs.keys()): if filter: if not sum([pat in obj for pat in filter]): continue sys.stdout.write(str(obj) + " ") sys.stdout.flush() t1 = clock() doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv)) t2 = clock() print(round(t2-t1, 3)) if __name__ == '__main__': doctests()
8,664
17.634409
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/factorials.py
from ..libmp.backend import xrange from .functions import defun, defun_wrapped @defun def gammaprod(ctx, a, b, _infsign=False): a = [ctx.convert(x) for x in a] b = [ctx.convert(x) for x in b] poles_num = [] poles_den = [] regular_num = [] regular_den = [] for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x) for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x) # One more pole in numerator or denominator gives 0 or inf if len(poles_num) < len(poles_den): return ctx.zero if len(poles_num) > len(poles_den): # Get correct sign of infinity for x+h, h -> 0 from above # XXX: hack, this should be done properly if _infsign: a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num] b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den] return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf else: return ctx.inf # All poles cancel # lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i) p = ctx.one orig = ctx.prec try: ctx.prec = orig + 15 while poles_num: i = poles_num.pop() j = poles_den.pop() p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i) for x in regular_num: p *= ctx.gamma(x) for x in regular_den: p /= ctx.gamma(x) finally: ctx.prec = orig return +p @defun def beta(ctx, x, y): x = ctx.convert(x) y = ctx.convert(y) if ctx.isinf(y): x, y = y, x if ctx.isinf(x): if x == ctx.inf and not ctx._im(y): if y == ctx.ninf: return ctx.nan if y > 0: return ctx.zero if ctx.isint(y): return ctx.nan if y < 0: return ctx.sign(ctx.gamma(y)) * ctx.inf return ctx.nan return ctx.gammaprod([x, y], [x+y]) @defun def binomial(ctx, n, k): return ctx.gammaprod([n+1], [k+1, n-k+1]) @defun def rf(ctx, x, n): return ctx.gammaprod([x+n], [x]) @defun def ff(ctx, x, n): return ctx.gammaprod([x+1], [x-n+1]) @defun_wrapped def fac2(ctx, x): if ctx.isinf(x): if x == ctx.inf: return x return ctx.nan return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1) @defun_wrapped def barnesg(ctx, z): if ctx.isinf(z): if z == ctx.inf: return z return ctx.nan if ctx.isnan(z): return z if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)): return z*0 # Account for size (would not be needed if computing log(G)) if abs(z) > 5: ctx.dps += 2*ctx.log(abs(z),2) # Reflection formula if ctx.re(z) < -ctx.dps: w = 1-z pi2 = 2*ctx.pi u = ctx.expjpi(2*w) v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \ ctx.j*ctx.polylog(2, u)/pi2 v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w if ctx._is_real_type(z): v = ctx._re(v) return v # Estimate terms for asymptotic expansion # TODO: fixme, obviously N = ctx.dps // 2 + 5 G = 1 while abs(z) < N or ctx.re(z) < 1: G /= ctx.gamma(z) z += 1 z -= 1 s = ctx.mpf(1)/12 s -= ctx.log(ctx.glaisher) s += z*ctx.log(2*ctx.pi)/2 s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z) s -= 3*z**2/4 z2k = z2 = z**2 for k in xrange(1, N+1): t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k) if abs(t) < ctx.eps: #print k, N # check how many terms were needed break z2k *= z2 s += t #if k == N: # print "warning: series for barnesg failed to converge", ctx.dps return G*ctx.exp(s) @defun def superfac(ctx, z): return ctx.barnesg(z+2) @defun_wrapped def hyperfac(ctx, z): # XXX: estimate needed extra bits accurately if z == ctx.inf: return z if abs(z) > 5: extra = 4*int(ctx.log(abs(z),2)) else: extra = 0 ctx.prec += extra if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)): n = int(ctx.re(z)) h = ctx.hyperfac(-n-1) if ((n+1)//2) & 1: h = -h if ctx._is_complex_type(z): return h + 0j return h zp1 = z+1 # Wrong branch cut #v = ctx.gamma(zp1)**z #ctx.prec -= extra #return v / ctx.barnesg(zp1) v = ctx.exp(z*ctx.loggamma(zp1)) ctx.prec -= extra return v / ctx.barnesg(zp1) @defun_wrapped def loggamma_old(ctx, z): a = ctx._re(z) b = ctx._im(z) if not b and a > 0: return ctx.ln(ctx.gamma_old(z)) u = ctx.arg(z) w = ctx.ln(ctx.gamma_old(z)) if b: gi = -b - u/2 + a*u + b*ctx.ln(abs(z)) n = ctx.floor((gi-ctx._im(w))/(2*ctx.pi)+0.5) * (2*ctx.pi) return w + n*ctx.j elif a < 0: n = int(ctx.floor(a)) w += (n-(n%2))*ctx.pi*ctx.j return w ''' @defun def psi0(ctx, z): """Shortcut for psi(0,z) (the digamma function)""" return ctx.psi(0, z) @defun def psi1(ctx, z): """Shortcut for psi(1,z) (the trigamma function)""" return ctx.psi(1, z) @defun def psi2(ctx, z): """Shortcut for psi(2,z) (the tetragamma function)""" return ctx.psi(2, z) @defun def psi3(ctx, z): """Shortcut for psi(3,z) (the pentagamma function)""" return ctx.psi(3, z) '''
5,404
26.29798
81
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/hypergeometric.py
from ..libmp.backend import xrange from .functions import defun, defun_wrapped def _check_need_perturb(ctx, terms, prec, discard_known_zeros): perturb = recompute = False extraprec = 0 discard = [] for term_index, term in enumerate(terms): w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term have_singular_nongamma_weight = False # Avoid division by zero in leading factors (TODO: # also check for near division by zero?) for k, w in enumerate(w_s): if not w: if ctx.re(c_s[k]) <= 0 and c_s[k]: perturb = recompute = True have_singular_nongamma_weight = True pole_count = [0, 0, 0] # Check for gamma and series poles and near-poles for data_index, data in enumerate([alpha_s, beta_s, b_s]): for i, x in enumerate(data): n, d = ctx.nint_distance(x) # Poles if n > 0: continue if d == ctx.ninf: # OK if we have a polynomial # ------------------------------ ok = False if data_index == 2: for u in a_s: if ctx.isnpint(u) and u >= int(n): ok = True break if ok: continue pole_count[data_index] += 1 # ------------------------------ #perturb = recompute = True #return perturb, recompute, extraprec elif d < -4: extraprec += -d recompute = True if discard_known_zeros and pole_count[1] > pole_count[0] + pole_count[2] \ and not have_singular_nongamma_weight: discard.append(term_index) elif sum(pole_count): perturb = recompute = True return perturb, recompute, extraprec, discard _hypercomb_msg = """ hypercomb() failed to converge to the requested %i bits of accuracy using a working precision of %i bits. The function value may be zero or infinite; try passing zeroprec=N or infprec=M to bound finite values between 2^(-N) and 2^M. Otherwise try a higher maxprec or maxterms. """ @defun def hypercomb(ctx, function, params=[], discard_known_zeros=True, **kwargs): orig = ctx.prec sumvalue = ctx.zero dist = ctx.nint_distance ninf = ctx.ninf orig_params = params[:] verbose = kwargs.get('verbose', False) maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(orig)) kwargs['maxprec'] = maxprec # For calls to hypsum zeroprec = kwargs.get('zeroprec') infprec = kwargs.get('infprec') perturbed_reference_value = None hextra = 0 try: while 1: ctx.prec += 10 if ctx.prec > maxprec: raise ValueError(_hypercomb_msg % (orig, ctx.prec)) orig2 = ctx.prec params = orig_params[:] terms = function(*params) if verbose: print() print("ENTERING hypercomb main loop") print("prec =", ctx.prec) print("hextra", hextra) perturb, recompute, extraprec, discard = \ _check_need_perturb(ctx, terms, orig, discard_known_zeros) ctx.prec += extraprec if perturb: if "hmag" in kwargs: hmag = kwargs["hmag"] elif ctx._fixed_precision: hmag = int(ctx.prec*0.3) else: hmag = orig + 10 + hextra h = ctx.ldexp(ctx.one, -hmag) ctx.prec = orig2 + 10 + hmag + 10 for k in range(len(params)): params[k] += h # Heuristically ensure that the perturbations # are "independent" so that two perturbations # don't accidentally cancel each other out # in a subtraction. h += h/(k+1) if recompute: terms = function(*params) if discard_known_zeros: terms = [term for (i, term) in enumerate(terms) if i not in discard] if not terms: return ctx.zero evaluated_terms = [] for term_index, term_data in enumerate(terms): w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term_data if verbose: print() print(" Evaluating term %i/%i : %iF%i" % \ (term_index+1, len(terms), len(a_s), len(b_s))) print(" powers", ctx.nstr(w_s), ctx.nstr(c_s)) print(" gamma", ctx.nstr(alpha_s), ctx.nstr(beta_s)) print(" hyper", ctx.nstr(a_s), ctx.nstr(b_s)) print(" z", ctx.nstr(z)) #v = ctx.hyper(a_s, b_s, z, **kwargs) #for a in alpha_s: v *= ctx.gamma(a) #for b in beta_s: v *= ctx.rgamma(b) #for w, c in zip(w_s, c_s): v *= ctx.power(w, c) v = ctx.fprod([ctx.hyper(a_s, b_s, z, **kwargs)] + \ [ctx.gamma(a) for a in alpha_s] + \ [ctx.rgamma(b) for b in beta_s] + \ [ctx.power(w,c) for (w,c) in zip(w_s,c_s)]) if verbose: print(" Value:", v) evaluated_terms.append(v) if len(terms) == 1 and (not perturb): sumvalue = evaluated_terms[0] break if ctx._fixed_precision: sumvalue = ctx.fsum(evaluated_terms) break sumvalue = ctx.fsum(evaluated_terms) term_magnitudes = [ctx.mag(x) for x in evaluated_terms] max_magnitude = max(term_magnitudes) sum_magnitude = ctx.mag(sumvalue) cancellation = max_magnitude - sum_magnitude if verbose: print() print(" Cancellation:", cancellation, "bits") print(" Increased precision:", ctx.prec - orig, "bits") precision_ok = cancellation < ctx.prec - orig if zeroprec is None: zero_ok = False else: zero_ok = max_magnitude - ctx.prec < -zeroprec if infprec is None: inf_ok = False else: inf_ok = max_magnitude > infprec if precision_ok and (not perturb) or ctx.isnan(cancellation): break elif precision_ok: if perturbed_reference_value is None: hextra += 20 perturbed_reference_value = sumvalue continue elif ctx.mag(sumvalue - perturbed_reference_value) <= \ ctx.mag(sumvalue) - orig: break elif zero_ok: sumvalue = ctx.zero break elif inf_ok: sumvalue = ctx.inf break elif 'hmag' in kwargs: break else: hextra *= 2 perturbed_reference_value = sumvalue # Increase precision else: increment = min(max(cancellation, orig//2), max(extraprec,orig)) ctx.prec += increment if verbose: print(" Must start over with increased precision") continue finally: ctx.prec = orig return +sumvalue @defun def hyper(ctx, a_s, b_s, z, **kwargs): """ Hypergeometric function, general case. """ z = ctx.convert(z) p = len(a_s) q = len(b_s) a_s = [ctx._convert_param(a) for a in a_s] b_s = [ctx._convert_param(b) for b in b_s] # Reduce degree by eliminating common parameters if kwargs.get('eliminate', True): elim_nonpositive = kwargs.get('eliminate_all', False) i = 0 while i < q and a_s: b = b_s[i] if b in a_s and (elim_nonpositive or not ctx.isnpint(b[0])): a_s.remove(b) b_s.remove(b) p -= 1 q -= 1 else: i += 1 # Handle special cases if p == 0: if q == 1: return ctx._hyp0f1(b_s, z, **kwargs) elif q == 0: return ctx.exp(z) elif p == 1: if q == 1: return ctx._hyp1f1(a_s, b_s, z, **kwargs) elif q == 2: return ctx._hyp1f2(a_s, b_s, z, **kwargs) elif q == 0: return ctx._hyp1f0(a_s[0][0], z) elif p == 2: if q == 1: return ctx._hyp2f1(a_s, b_s, z, **kwargs) elif q == 2: return ctx._hyp2f2(a_s, b_s, z, **kwargs) elif q == 3: return ctx._hyp2f3(a_s, b_s, z, **kwargs) elif q == 0: return ctx._hyp2f0(a_s, b_s, z, **kwargs) elif p == q+1: return ctx._hypq1fq(p, q, a_s, b_s, z, **kwargs) elif p > q+1 and not kwargs.get('force_series'): return ctx._hyp_borel(p, q, a_s, b_s, z, **kwargs) coeffs, types = zip(*(a_s+b_s)) return ctx.hypsum(p, q, types, coeffs, z, **kwargs) @defun def hyp0f1(ctx,b,z,**kwargs): return ctx.hyper([],[b],z,**kwargs) @defun def hyp1f1(ctx,a,b,z,**kwargs): return ctx.hyper([a],[b],z,**kwargs) @defun def hyp1f2(ctx,a1,b1,b2,z,**kwargs): return ctx.hyper([a1],[b1,b2],z,**kwargs) @defun def hyp2f1(ctx,a,b,c,z,**kwargs): return ctx.hyper([a,b],[c],z,**kwargs) @defun def hyp2f2(ctx,a1,a2,b1,b2,z,**kwargs): return ctx.hyper([a1,a2],[b1,b2],z,**kwargs) @defun def hyp2f3(ctx,a1,a2,b1,b2,b3,z,**kwargs): return ctx.hyper([a1,a2],[b1,b2,b3],z,**kwargs) @defun def hyp2f0(ctx,a,b,z,**kwargs): return ctx.hyper([a,b],[],z,**kwargs) @defun def hyp3f2(ctx,a1,a2,a3,b1,b2,z,**kwargs): return ctx.hyper([a1,a2,a3],[b1,b2],z,**kwargs) @defun_wrapped def _hyp1f0(ctx, a, z): return (1-z) ** (-a) @defun def _hyp0f1(ctx, b_s, z, **kwargs): (b, btype), = b_s if z: magz = ctx.mag(z) else: magz = 0 if magz >= 8 and not kwargs.get('force_series'): try: # http://functions.wolfram.com/HypergeometricFunctions/ # Hypergeometric0F1/06/02/03/0004/ # TODO: handle the all-real case more efficiently! # TODO: figure out how much precision is needed (exponential growth) orig = ctx.prec try: ctx.prec += 12 + magz//2 def h(): w = ctx.sqrt(-z) jw = ctx.j*w u = 1/(4*jw) c = ctx.mpq_1_2 - b E = ctx.exp(2*jw) T1 = ([-jw,E], [c,-1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], -u) T2 = ([jw,E], [c,1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], u) return T1, T2 v = ctx.hypercomb(h, [], force_series=True) v = ctx.gamma(b)/(2*ctx.sqrt(ctx.pi))*v finally: ctx.prec = orig if ctx._is_real_type(b) and ctx._is_real_type(z): v = ctx._re(v) return +v except ctx.NoConvergence: pass return ctx.hypsum(0, 1, (btype,), [b], z, **kwargs) @defun def _hyp1f1(ctx, a_s, b_s, z, **kwargs): (a, atype), = a_s (b, btype), = b_s if not z: return ctx.one+z magz = ctx.mag(z) if magz >= 7 and not (ctx.isint(a) and ctx.re(a) <= 0): if ctx.isinf(z): if ctx.sign(a) == ctx.sign(b) == ctx.sign(z) == 1: return ctx.inf return ctx.nan * z try: try: ctx.prec += magz sector = ctx._im(z) < 0 def h(a,b): if sector: E = ctx.expjpi(ctx.fneg(a, exact=True)) else: E = ctx.expjpi(a) rz = 1/z T1 = ([E,z], [1,-a], [b], [b-a], [a, 1+a-b], [], -rz) T2 = ([ctx.exp(z),z], [1,a-b], [b], [a], [b-a, 1-a], [], rz) return T1, T2 v = ctx.hypercomb(h, [a,b], force_series=True) if ctx._is_real_type(a) and ctx._is_real_type(b) and ctx._is_real_type(z): v = ctx._re(v) return +v except ctx.NoConvergence: pass finally: ctx.prec -= magz v = ctx.hypsum(1, 1, (atype, btype), [a, b], z, **kwargs) return v def _hyp2f1_gosper(ctx,a,b,c,z,**kwargs): # Use Gosper's recurrence # See http://www.math.utexas.edu/pipermail/maxima/2006/000126.html _a,_b,_c,_z = a, b, c, z orig = ctx.prec maxprec = kwargs.get('maxprec', 100*orig) extra = 10 while 1: ctx.prec = orig + extra #a = ctx.convert(_a) #b = ctx.convert(_b) #c = ctx.convert(_c) z = ctx.convert(_z) d = ctx.mpf(0) e = ctx.mpf(1) f = ctx.mpf(0) k = 0 # Common subexpression elimination, unfortunately making # things a bit unreadable. The formula is quite messy to begin # with, though... abz = a*b*z ch = c * ctx.mpq_1_2 c1h = (c+1) * ctx.mpq_1_2 nz = 1-z g = z/nz abg = a*b*g cba = c-b-a z2 = z-2 tol = -ctx.prec - 10 nstr = ctx.nstr nprint = ctx.nprint mag = ctx.mag maxmag = ctx.ninf while 1: kch = k+ch kakbz = (k+a)*(k+b)*z / (4*(k+1)*kch*(k+c1h)) d1 = kakbz*(e-(k+cba)*d*g) e1 = kakbz*(d*abg+(k+c)*e) ft = d*(k*(cba*z+k*z2-c)-abz)/(2*kch*nz) f1 = f + e - ft maxmag = max(maxmag, mag(f1)) if mag(f1-f) < tol: break d, e, f = d1, e1, f1 k += 1 cancellation = maxmag - mag(f1) if cancellation < extra: break else: extra += cancellation if extra > maxprec: raise ctx.NoConvergence return f1 @defun def _hyp2f1(ctx, a_s, b_s, z, **kwargs): (a, atype), (b, btype) = a_s (c, ctype), = b_s if z == 1: # TODO: the following logic can be simplified convergent = ctx.re(c-a-b) > 0 finite = (ctx.isint(a) and a <= 0) or (ctx.isint(b) and b <= 0) zerodiv = ctx.isint(c) and c <= 0 and not \ ((ctx.isint(a) and c <= a <= 0) or (ctx.isint(b) and c <= b <= 0)) #print "bz", a, b, c, z, convergent, finite, zerodiv # Gauss's theorem gives the value if convergent if (convergent or finite) and not zerodiv: return ctx.gammaprod([c, c-a-b], [c-a, c-b], _infsign=True) # Otherwise, there is a pole and we take the # sign to be that when approaching from below # XXX: this evaluation is not necessarily correct in all cases return ctx.hyp2f1(a,b,c,1-ctx.eps*2) * ctx.inf # Equal to 1 (first term), unless there is a subsequent # division by zero if not z: # Division by zero but power of z is higher than # first order so cancels if c or a == 0 or b == 0: return 1+z # Indeterminate return ctx.nan # Hit zero denominator unless numerator goes to 0 first if ctx.isint(c) and c <= 0: if (ctx.isint(a) and c <= a <= 0) or \ (ctx.isint(b) and c <= b <= 0): pass else: # Pole in series return ctx.inf absz = abs(z) # Fast case: standard series converges rapidly, # possibly in finitely many terms if absz <= 0.8 or (ctx.isint(a) and a <= 0 and a >= -1000) or \ (ctx.isint(b) and b <= 0 and b >= -1000): return ctx.hypsum(2, 1, (atype, btype, ctype), [a, b, c], z, **kwargs) orig = ctx.prec try: ctx.prec += 10 # Use 1/z transformation if absz >= 1.3: def h(a,b): t = ctx.mpq_1-c; ab = a-b; rz = 1/z T1 = ([-z],[-a], [c,-ab],[b,c-a], [a,t+a],[ctx.mpq_1+ab], rz) T2 = ([-z],[-b], [c,ab],[a,c-b], [b,t+b],[ctx.mpq_1-ab], rz) return T1, T2 v = ctx.hypercomb(h, [a,b], **kwargs) # Use 1-z transformation elif abs(1-z) <= 0.75: def h(a,b): t = c-a-b; ca = c-a; cb = c-b; rz = 1-z T1 = [], [], [c,t], [ca,cb], [a,b], [1-t], rz T2 = [rz], [t], [c,a+b-c], [a,b], [ca,cb], [1+t], rz return T1, T2 v = ctx.hypercomb(h, [a,b], **kwargs) # Use z/(z-1) transformation elif abs(z/(z-1)) <= 0.75: v = ctx.hyp2f1(a, c-b, c, z/(z-1)) / (1-z)**a # Remaining part of unit circle else: v = _hyp2f1_gosper(ctx,a,b,c,z,**kwargs) finally: ctx.prec = orig return +v @defun def _hypq1fq(ctx, p, q, a_s, b_s, z, **kwargs): r""" Evaluates 3F2, 4F3, 5F4, ... """ a_s, a_types = zip(*a_s) b_s, b_types = zip(*b_s) a_s = list(a_s) b_s = list(b_s) absz = abs(z) ispoly = False for a in a_s: if ctx.isint(a) and a <= 0: ispoly = True break # Direct summation if absz < 1 or ispoly: try: return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) except ctx.NoConvergence: if absz > 1.1 or ispoly: raise # Use expansion at |z-1| -> 0. # Reference: Wolfgang Buhring, "Generalized Hypergeometric Functions at # Unit Argument", Proc. Amer. Math. Soc., Vol. 114, No. 1 (Jan. 1992), # pp.145-153 # The current implementation has several problems: # 1. We only implement it for 3F2. The expansion coefficients are # given by extremely messy nested sums in the higher degree cases # (see reference). Is efficient sequential generation of the coefficients # possible in the > 3F2 case? # 2. Although the series converges, it may do so slowly, so we need # convergence acceleration. The acceleration implemented by # nsum does not always help, so results returned are sometimes # inaccurate! Can we do better? # 3. We should check conditions for convergence, and possibly # do a better job of cancelling out gamma poles if possible. if z == 1: # XXX: should also check for division by zero in the # denominator of the series (cf. hyp2f1) S = ctx.re(sum(b_s)-sum(a_s)) if S <= 0: #return ctx.hyper(a_s, b_s, 1-ctx.eps*2, **kwargs) * ctx.inf return ctx.hyper(a_s, b_s, 0.9, **kwargs) * ctx.inf if (p,q) == (3,2) and abs(z-1) < 0.05: # and kwargs.get('sum1') #print "Using alternate summation (experimental)" a1,a2,a3 = a_s b1,b2 = b_s u = b1+b2-a3 initial = ctx.gammaprod([b2-a3,b1-a3,a1,a2],[b2-a3,b1-a3,1,u]) def term(k, _cache={0:initial}): u = b1+b2-a3+k if k in _cache: t = _cache[k] else: t = _cache[k-1] t *= (b1+k-a3-1)*(b2+k-a3-1) t /= k*(u-1) _cache[k] = t return t * ctx.hyp2f1(a1,a2,u,z) try: S = ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), strict=kwargs.get('strict', True)) return S * ctx.gammaprod([b1,b2],[a1,a2,a3]) except ctx.NoConvergence: pass # Try to use convergence acceleration on and close to the unit circle. # Problem: the convergence acceleration degenerates as |z-1| -> 0, # except for special cases. Everywhere else, the Shanks transformation # is very efficient. if absz < 1.1 and ctx._re(z) <= 1: def term(kk, _cache={0:ctx.one}): k = int(kk) if k != kk: t = z ** ctx.mpf(kk) / ctx.fac(kk) for a in a_s: t *= ctx.rf(a,kk) for b in b_s: t /= ctx.rf(b,kk) return t if k in _cache: return _cache[k] t = term(k-1) m = k-1 for j in xrange(p): t *= (a_s[j]+m) for j in xrange(q): t /= (b_s[j]+m) t *= z t /= k _cache[k] = t return t sum_method = kwargs.get('sum_method', 'r+s+e') try: return ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), strict=kwargs.get('strict', True), method=sum_method.replace('e','')) except ctx.NoConvergence: if 'e' not in sum_method: raise pass if kwargs.get('verbose'): print("Attempting Euler-Maclaurin summation") """ Somewhat slower version (one diffs_exp for each factor). However, this would be faster with fast direct derivatives of the gamma function. def power_diffs(k0): r = 0 l = ctx.log(z) while 1: yield z**ctx.mpf(k0) * l**r r += 1 def loggamma_diffs(x, reciprocal=False): sign = (-1) ** reciprocal yield sign * ctx.loggamma(x) i = 0 while 1: yield sign * ctx.psi(i,x) i += 1 def hyper_diffs(k0): b2 = b_s + [1] A = [ctx.diffs_exp(loggamma_diffs(a+k0)) for a in a_s] B = [ctx.diffs_exp(loggamma_diffs(b+k0,True)) for b in b2] Z = [power_diffs(k0)] C = ctx.gammaprod([b for b in b2], [a for a in a_s]) for d in ctx.diffs_prod(A + B + Z): v = C * d yield v """ def log_diffs(k0): b2 = b_s + [1] yield sum(ctx.loggamma(a+k0) for a in a_s) - \ sum(ctx.loggamma(b+k0) for b in b2) + k0*ctx.log(z) i = 0 while 1: v = sum(ctx.psi(i,a+k0) for a in a_s) - \ sum(ctx.psi(i,b+k0) for b in b2) if i == 0: v += ctx.log(z) yield v i += 1 def hyper_diffs(k0): C = ctx.gammaprod([b for b in b_s], [a for a in a_s]) for d in ctx.diffs_exp(log_diffs(k0)): v = C * d yield v tol = ctx.eps / 1024 prec = ctx.prec try: trunc = 50 * ctx.dps ctx.prec += 20 for i in xrange(5): head = ctx.fsum(term(k) for k in xrange(trunc)) tail, err = ctx.sumem(term, [trunc, ctx.inf], tol=tol, adiffs=hyper_diffs(trunc), verbose=kwargs.get('verbose'), error=True, _fast_abort=True) if err < tol: v = head + tail break trunc *= 2 # Need to increase precision because calculation of # derivatives may be inaccurate ctx.prec += ctx.prec//2 if i == 4: raise ctx.NoConvergence(\ "Euler-Maclaurin summation did not converge") finally: ctx.prec = prec return +v # Use 1/z transformation # http://functions.wolfram.com/HypergeometricFunctions/ # HypergeometricPFQ/06/01/05/02/0004/ def h(*args): a_s = list(args[:p]) b_s = list(args[p:]) Ts = [] recz = ctx.one/z negz = ctx.fneg(z, exact=True) for k in range(q+1): ak = a_s[k] C = [negz] Cp = [-ak] Gn = b_s + [ak] + [a_s[j]-ak for j in range(q+1) if j != k] Gd = a_s + [b_s[j]-ak for j in range(q)] Fn = [ak] + [ak-b_s[j]+1 for j in range(q)] Fd = [1-a_s[j]+ak for j in range(q+1) if j != k] Ts.append((C, Cp, Gn, Gd, Fn, Fd, recz)) return Ts return ctx.hypercomb(h, a_s+b_s, **kwargs) @defun def _hyp_borel(ctx, p, q, a_s, b_s, z, **kwargs): if a_s: a_s, a_types = zip(*a_s) a_s = list(a_s) else: a_s, a_types = [], () if b_s: b_s, b_types = zip(*b_s) b_s = list(b_s) else: b_s, b_types = [], () kwargs['maxterms'] = kwargs.get('maxterms', ctx.prec) try: return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) except ctx.NoConvergence: pass prec = ctx.prec try: tol = kwargs.get('asymp_tol', ctx.eps/4) ctx.prec += 10 # hypsum is has a conservative tolerance. So we try again: def term(k, cache={0:ctx.one}): if k in cache: return cache[k] t = term(k-1) for a in a_s: t *= (a+(k-1)) for b in b_s: t /= (b+(k-1)) t *= z t /= k cache[k] = t return t s = ctx.one for k in xrange(1, ctx.prec): t = term(k) s += t if abs(t) <= tol: return s finally: ctx.prec = prec if p <= q+3: contour = kwargs.get('contour') if not contour: if ctx.arg(z) < 0.25: u = z / max(1, abs(z)) if ctx.arg(z) >= 0: contour = [0, 2j, (2j+2)/u, 2/u, ctx.inf] else: contour = [0, -2j, (-2j+2)/u, 2/u, ctx.inf] #contour = [0, 2j/z, 2/z, ctx.inf] #contour = [0, 2j, 2/z, ctx.inf] #contour = [0, 2j, ctx.inf] else: contour = [0, ctx.inf] quad_kwargs = kwargs.get('quad_kwargs', {}) def g(t): return ctx.exp(-t)*ctx.hyper(a_s, b_s+[1], t*z) I, err = ctx.quad(g, contour, error=True, **quad_kwargs) if err <= abs(I)*ctx.eps*8: return I raise ctx.NoConvergence @defun def _hyp2f2(ctx, a_s, b_s, z, **kwargs): (a1, a1type), (a2, a2type) = a_s (b1, b1type), (b2, b2type) = b_s absz = abs(z) magz = ctx.mag(z) orig = ctx.prec # Asymptotic expansion is ~ exp(z) asymp_extraprec = magz # Asymptotic series is in terms of 3F1 can_use_asymptotic = (not kwargs.get('force_series')) and \ (ctx.mag(absz) > 3) # TODO: much of the following could be shared with 2F3 instead of # copypasted if can_use_asymptotic: #print "using asymp" try: try: ctx.prec += asymp_extraprec # http://functions.wolfram.com/HypergeometricFunctions/ # Hypergeometric2F2/06/02/02/0002/ def h(a1,a2,b1,b2): X = a1+a2-b1-b2 A2 = a1+a2 B2 = b1+b2 c = {} c[0] = ctx.one c[1] = (A2-1)*X+b1*b2-a1*a2 s1 = 0 k = 0 tprev = 0 while 1: if k not in c: uu1 = 1-B2+2*a1+a1**2+2*a2+a2**2-A2*B2+a1*a2+b1*b2+(2*B2-3*(A2+1))*k+2*k**2 uu2 = (k-A2+b1-1)*(k-A2+b2-1)*(k-X-2) c[k] = ctx.one/k * (uu1*c[k-1]-uu2*c[k-2]) t1 = c[k] * z**(-k) if abs(t1) < 0.1*ctx.eps: #print "Convergence :)" break # Quit if the series doesn't converge quickly enough if k > 5 and abs(tprev) / abs(t1) < 1.5: #print "No convergence :(" raise ctx.NoConvergence s1 += t1 tprev = t1 k += 1 S = ctx.exp(z)*s1 T1 = [z,S], [X,1], [b1,b2],[a1,a2],[],[],0 T2 = [-z],[-a1],[b1,b2,a2-a1],[a2,b1-a1,b2-a1],[a1,a1-b1+1,a1-b2+1],[a1-a2+1],-1/z T3 = [-z],[-a2],[b1,b2,a1-a2],[a1,b1-a2,b2-a2],[a2,a2-b1+1,a2-b2+1],[-a1+a2+1],-1/z return T1, T2, T3 v = ctx.hypercomb(h, [a1,a2,b1,b2], force_series=True, maxterms=4*ctx.prec) if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,z]) == 5: v = ctx.re(v) return v except ctx.NoConvergence: pass finally: ctx.prec = orig return ctx.hypsum(2, 2, (a1type, a2type, b1type, b2type), [a1, a2, b1, b2], z, **kwargs) @defun def _hyp1f2(ctx, a_s, b_s, z, **kwargs): (a1, a1type), = a_s (b1, b1type), (b2, b2type) = b_s absz = abs(z) magz = ctx.mag(z) orig = ctx.prec # Asymptotic expansion is ~ exp(sqrt(z)) asymp_extraprec = z and magz//2 # Asymptotic series is in terms of 3F0 can_use_asymptotic = (not kwargs.get('force_series')) and \ (ctx.mag(absz) > 19) and \ (ctx.sqrt(absz) > 1.5*orig) # and \ # ctx._hyp_check_convergence([a1, a1-b1+1, a1-b2+1], [], # 1/absz, orig+40+asymp_extraprec) # TODO: much of the following could be shared with 2F3 instead of # copypasted if can_use_asymptotic: #print "using asymp" try: try: ctx.prec += asymp_extraprec # http://functions.wolfram.com/HypergeometricFunctions/ # Hypergeometric1F2/06/02/03/ def h(a1,b1,b2): X = ctx.mpq_1_2*(a1-b1-b2+ctx.mpq_1_2) c = {} c[0] = ctx.one c[1] = 2*(ctx.mpq_1_4*(3*a1+b1+b2-2)*(a1-b1-b2)+b1*b2-ctx.mpq_3_16) c[2] = 2*(b1*b2+ctx.mpq_1_4*(a1-b1-b2)*(3*a1+b1+b2-2)-ctx.mpq_3_16)**2+\ ctx.mpq_1_16*(-16*(2*a1-3)*b1*b2 + \ 4*(a1-b1-b2)*(-8*a1**2+11*a1+b1+b2-2)-3) s1 = 0 s2 = 0 k = 0 tprev = 0 while 1: if k not in c: uu1 = (3*k**2+(-6*a1+2*b1+2*b2-4)*k + 3*a1**2 - \ (b1-b2)**2 - 2*a1*(b1+b2-2) + ctx.mpq_1_4) uu2 = (k-a1+b1-b2-ctx.mpq_1_2)*(k-a1-b1+b2-ctx.mpq_1_2)*\ (k-a1+b1+b2-ctx.mpq_5_2) c[k] = ctx.one/(2*k)*(uu1*c[k-1]-uu2*c[k-2]) w = c[k] * (-z)**(-0.5*k) t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w t2 = ctx.j**k * ctx.mpf(2)**(-k) * w if abs(t1) < 0.1*ctx.eps: #print "Convergence :)" break # Quit if the series doesn't converge quickly enough if k > 5 and abs(tprev) / abs(t1) < 1.5: #print "No convergence :(" raise ctx.NoConvergence s1 += t1 s2 += t2 tprev = t1 k += 1 S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2], [a1],\ [], [], 0 T2 = [-z], [-a1], [b1,b2],[b1-a1,b2-a1], \ [a1,a1-b1+1,a1-b2+1], [], 1/z return T1, T2 v = ctx.hypercomb(h, [a1,b1,b2], force_series=True, maxterms=4*ctx.prec) if sum(ctx._is_real_type(u) for u in [a1,b1,b2,z]) == 4: v = ctx.re(v) return v except ctx.NoConvergence: pass finally: ctx.prec = orig #print "not using asymp" return ctx.hypsum(1, 2, (a1type, b1type, b2type), [a1, b1, b2], z, **kwargs) @defun def _hyp2f3(ctx, a_s, b_s, z, **kwargs): (a1, a1type), (a2, a2type) = a_s (b1, b1type), (b2, b2type), (b3, b3type) = b_s absz = abs(z) magz = ctx.mag(z) # Asymptotic expansion is ~ exp(sqrt(z)) asymp_extraprec = z and magz//2 orig = ctx.prec # Asymptotic series is in terms of 4F1 # The square root below empirically provides a plausible criterion # for the leading series to converge can_use_asymptotic = (not kwargs.get('force_series')) and \ (ctx.mag(absz) > 19) and (ctx.sqrt(absz) > 1.5*orig) if can_use_asymptotic: #print "using asymp" try: try: ctx.prec += asymp_extraprec # http://functions.wolfram.com/HypergeometricFunctions/ # Hypergeometric2F3/06/02/03/01/0002/ def h(a1,a2,b1,b2,b3): X = ctx.mpq_1_2*(a1+a2-b1-b2-b3+ctx.mpq_1_2) A2 = a1+a2 B3 = b1+b2+b3 A = a1*a2 B = b1*b2+b3*b2+b1*b3 R = b1*b2*b3 c = {} c[0] = ctx.one c[1] = 2*(B - A + ctx.mpq_1_4*(3*A2+B3-2)*(A2-B3) - ctx.mpq_3_16) c[2] = ctx.mpq_1_2*c[1]**2 + ctx.mpq_1_16*(-16*(2*A2-3)*(B-A) + 32*R +\ 4*(-8*A2**2 + 11*A2 + 8*A + B3 - 2)*(A2-B3)-3) s1 = 0 s2 = 0 k = 0 tprev = 0 while 1: if k not in c: uu1 = (k-2*X-3)*(k-2*X-2*b1-1)*(k-2*X-2*b2-1)*\ (k-2*X-2*b3-1) uu2 = (4*(k-1)**3 - 6*(4*X+B3)*(k-1)**2 + \ 2*(24*X**2+12*B3*X+4*B+B3-1)*(k-1) - 32*X**3 - \ 24*B3*X**2 - 4*B - 8*R - 4*(4*B+B3-1)*X + 2*B3-1) uu3 = (5*(k-1)**2+2*(-10*X+A2-3*B3+3)*(k-1)+2*c[1]) c[k] = ctx.one/(2*k)*(uu1*c[k-3]-uu2*c[k-2]+uu3*c[k-1]) w = c[k] * ctx.power(-z, -0.5*k) t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w t2 = ctx.j**k * ctx.mpf(2)**(-k) * w if abs(t1) < 0.1*ctx.eps: break # Quit if the series doesn't converge quickly enough if k > 5 and abs(tprev) / abs(t1) < 1.5: raise ctx.NoConvergence s1 += t1 s2 += t2 tprev = t1 k += 1 S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2, b3], [a1, a2],\ [], [], 0 T2 = [-z], [-a1], [b1,b2,b3,a2-a1],[a2,b1-a1,b2-a1,b3-a1], \ [a1,a1-b1+1,a1-b2+1,a1-b3+1], [a1-a2+1], 1/z T3 = [-z], [-a2], [b1,b2,b3,a1-a2],[a1,b1-a2,b2-a2,b3-a2], \ [a2,a2-b1+1,a2-b2+1,a2-b3+1],[-a1+a2+1], 1/z return T1, T2, T3 v = ctx.hypercomb(h, [a1,a2,b1,b2,b3], force_series=True, maxterms=4*ctx.prec) if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,b3,z]) == 6: v = ctx.re(v) return v except ctx.NoConvergence: pass finally: ctx.prec = orig return ctx.hypsum(2, 3, (a1type, a2type, b1type, b2type, b3type), [a1, a2, b1, b2, b3], z, **kwargs) @defun def _hyp2f0(ctx, a_s, b_s, z, **kwargs): (a, atype), (b, btype) = a_s # We want to try aggressively to use the asymptotic expansion, # and fall back only when absolutely necessary try: kwargsb = kwargs.copy() kwargsb['maxterms'] = kwargsb.get('maxterms', ctx.prec) return ctx.hypsum(2, 0, (atype,btype), [a,b], z, **kwargsb) except ctx.NoConvergence: if kwargs.get('force_series'): raise pass def h(a, b): w = ctx.sinpi(b) rz = -1/z T1 = ([ctx.pi,w,rz],[1,-1,a],[],[a-b+1,b],[a],[b],rz) T2 = ([-ctx.pi,w,rz],[1,-1,1+a-b],[],[a,2-b],[a-b+1],[2-b],rz) return T1, T2 return ctx.hypercomb(h, [a, 1+a-b], **kwargs) @defun def meijerg(ctx, a_s, b_s, z, r=1, series=None, **kwargs): an, ap = a_s bm, bq = b_s n = len(an) p = n + len(ap) m = len(bm) q = m + len(bq) a = an+ap b = bm+bq a = [ctx.convert(_) for _ in a] b = [ctx.convert(_) for _ in b] z = ctx.convert(z) if series is None: if p < q: series = 1 if p > q: series = 2 if p == q: if m+n == p and abs(z) > 1: series = 2 else: series = 1 if kwargs.get('verbose'): print("Meijer G m,n,p,q,series =", m,n,p,q,series) if series == 1: def h(*args): a = args[:p] b = args[p:] terms = [] for k in range(m): bases = [z] expts = [b[k]/r] gn = [b[j]-b[k] for j in range(m) if j != k] gn += [1-a[j]+b[k] for j in range(n)] gd = [a[j]-b[k] for j in range(n,p)] gd += [1-b[j]+b[k] for j in range(m,q)] hn = [1-a[j]+b[k] for j in range(p)] hd = [1-b[j]+b[k] for j in range(q) if j != k] hz = (-ctx.one)**(p-m-n) * z**(ctx.one/r) terms.append((bases, expts, gn, gd, hn, hd, hz)) return terms else: def h(*args): a = args[:p] b = args[p:] terms = [] for k in range(n): bases = [z] if r == 1: expts = [a[k]-1] else: expts = [(a[k]-1)/ctx.convert(r)] gn = [a[k]-a[j] for j in range(n) if j != k] gn += [1-a[k]+b[j] for j in range(m)] gd = [a[k]-b[j] for j in range(m,q)] gd += [1-a[k]+a[j] for j in range(n,p)] hn = [1-a[k]+b[j] for j in range(q)] hd = [1+a[j]-a[k] for j in range(p) if j != k] hz = (-ctx.one)**(q-m-n) / z**(ctx.one/r) terms.append((bases, expts, gn, gd, hn, hd, hz)) return terms return ctx.hypercomb(h, a+b, **kwargs) @defun_wrapped def appellf1(ctx,a,b1,b2,c,x,y,**kwargs): # Assume x smaller # We will use x for the outer loop if abs(x) > abs(y): x, y = y, x b1, b2 = b2, b1 def ok(x): return abs(x) < 0.99 # Finite cases if ctx.isnpint(a): pass elif ctx.isnpint(b1): pass elif ctx.isnpint(b2): x, y, b1, b2 = y, x, b2, b1 else: #print x, y # Note: ok if |y| > 1, because # 2F1 implements analytic continuation if not ok(x): u1 = (x-y)/(x-1) if not ok(u1): raise ValueError("Analytic continuation not implemented") #print "Using analytic continuation" return (1-x)**(-b1)*(1-y)**(c-a-b2)*\ ctx.appellf1(c-a,b1,c-b1-b2,c,u1,y,**kwargs) return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m+n':[c]}, x,y, **kwargs) @defun def appellf2(ctx,a,b1,b2,c1,c2,x,y,**kwargs): # TODO: continuation return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m':[c1],'n':[c2]}, x,y, **kwargs) @defun def appellf3(ctx,a1,a2,b1,b2,c,x,y,**kwargs): outer_polynomial = ctx.isnpint(a1) or ctx.isnpint(b1) inner_polynomial = ctx.isnpint(a2) or ctx.isnpint(b2) if not outer_polynomial: if inner_polynomial or abs(x) > abs(y): x, y = y, x a1,a2,b1,b2 = a2,a1,b2,b1 return ctx.hyper2d({'m':[a1,b1],'n':[a2,b2]}, {'m+n':[c]},x,y,**kwargs) @defun def appellf4(ctx,a,b,c1,c2,x,y,**kwargs): # TODO: continuation return ctx.hyper2d({'m+n':[a,b]}, {'m':[c1],'n':[c2]},x,y,**kwargs) @defun def hyper2d(ctx, a, b, x, y, **kwargs): r""" Sums the generalized 2D hypergeometric series .. math :: \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{P((a),m,n)}{Q((b),m,n)} \frac{x^m y^n} {m! n!} where `(a) = (a_1,\ldots,a_r)`, `(b) = (b_1,\ldots,b_s)` and where `P` and `Q` are products of rising factorials such as `(a_j)_n` or `(a_j)_{m+n}`. `P` and `Q` are specified in the form of dicts, with the `m` and `n` dependence as keys and parameter lists as values. The supported rising factorials are given in the following table (note that only a few are supported in `Q`): +------------+-------------------+--------+ | Key | Rising factorial | `Q` | +============+===================+========+ | ``'m'`` | `(a_j)_m` | Yes | +------------+-------------------+--------+ | ``'n'`` | `(a_j)_n` | Yes | +------------+-------------------+--------+ | ``'m+n'`` | `(a_j)_{m+n}` | Yes | +------------+-------------------+--------+ | ``'m-n'`` | `(a_j)_{m-n}` | No | +------------+-------------------+--------+ | ``'n-m'`` | `(a_j)_{n-m}` | No | +------------+-------------------+--------+ | ``'2m+n'`` | `(a_j)_{2m+n}` | No | +------------+-------------------+--------+ | ``'2m-n'`` | `(a_j)_{2m-n}` | No | +------------+-------------------+--------+ | ``'2n-m'`` | `(a_j)_{2n-m}` | No | +------------+-------------------+--------+ For example, the Appell F1 and F4 functions .. math :: F_1 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a)_{m+n} (b)_m (c)_n}{(d)_{m+n}} \frac{x^m y^n}{m! n!} F_4 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} \frac{(a)_{m+n} (b)_{m+n}}{(c)_m (d)_{n}} \frac{x^m y^n}{m! n!} can be represented respectively as ``hyper2d({'m+n':[a], 'm':[b], 'n':[c]}, {'m+n':[d]}, x, y)`` ``hyper2d({'m+n':[a,b]}, {'m':[c], 'n':[d]}, x, y)`` More generally, :func:`~mpmath.hyper2d` can evaluate any of the 34 distinct convergent second-order (generalized Gaussian) hypergeometric series enumerated by Horn, as well as the Kampe de Feriet function. The series is computed by rewriting it so that the inner series (i.e. the series containing `n` and `y`) has the form of an ordinary generalized hypergeometric series and thereby can be evaluated efficiently using :func:`~mpmath.hyper`. If possible, manually swapping `x` and `y` and the corresponding parameters can sometimes give better results. **Examples** Two separable cases: a product of two geometric series, and a product of two Gaussian hypergeometric functions:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> x, y = mpf(0.25), mpf(0.5) >>> hyper2d({'m':1,'n':1}, {}, x,y) 2.666666666666666666666667 >>> 1/(1-x)/(1-y) 2.666666666666666666666667 >>> hyper2d({'m':[1,2],'n':[3,4]}, {'m':[5],'n':[6]}, x,y) 4.164358531238938319669856 >>> hyp2f1(1,2,5,x)*hyp2f1(3,4,6,y) 4.164358531238938319669856 Some more series that can be done in closed form:: >>> hyper2d({'m':1,'n':1},{'m+n':1},x,y) 2.013417124712514809623881 >>> (exp(x)*x-exp(y)*y)/(x-y) 2.013417124712514809623881 Six of the 34 Horn functions, G1-G3 and H1-H3:: >>> from mpmath import * >>> mp.dps = 10; mp.pretty = True >>> x, y = 0.0625, 0.125 >>> a1,a2,b1,b2,c1,c2,d = 1.1,-1.2,-1.3,-1.4,1.5,-1.6,1.7 >>> hyper2d({'m+n':a1,'n-m':b1,'m-n':b2},{},x,y) # G1 1.139090746 >>> nsum(lambda m,n: rf(a1,m+n)*rf(b1,n-m)*rf(b2,m-n)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) 1.139090746 >>> hyper2d({'m':a1,'n':a2,'n-m':b1,'m-n':b2},{},x,y) # G2 0.9503682696 >>> nsum(lambda m,n: rf(a1,m)*rf(a2,n)*rf(b1,n-m)*rf(b2,m-n)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) 0.9503682696 >>> hyper2d({'2n-m':a1,'2m-n':a2},{},x,y) # G3 1.029372029 >>> nsum(lambda m,n: rf(a1,2*n-m)*rf(a2,2*m-n)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) 1.029372029 >>> hyper2d({'m-n':a1,'m+n':b1,'n':c1},{'m':d},x,y) # H1 -1.605331256 >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m+n)*rf(c1,n)/rf(d,m)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) -1.605331256 >>> hyper2d({'m-n':a1,'m':b1,'n':[c1,c2]},{'m':d},x,y) # H2 -2.35405404 >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m)*rf(c1,n)*rf(c2,n)/rf(d,m)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) -2.35405404 >>> hyper2d({'2m+n':a1,'n':b1},{'m+n':c1},x,y) # H3 0.974479074 >>> nsum(lambda m,n: rf(a1,2*m+n)*rf(b1,n)/rf(c1,m+n)*\ ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) 0.974479074 **References** 1. [SrivastavaKarlsson]_ 2. [Weisstein]_ http://mathworld.wolfram.com/HornFunction.html 3. [Weisstein]_ http://mathworld.wolfram.com/AppellHypergeometricFunction.html """ x = ctx.convert(x) y = ctx.convert(y) def parse(dct, key): args = dct.pop(key, []) try: args = list(args) except TypeError: args = [args] return [ctx.convert(arg) for arg in args] a_s = dict(a) b_s = dict(b) a_m = parse(a, 'm') a_n = parse(a, 'n') a_m_add_n = parse(a, 'm+n') a_m_sub_n = parse(a, 'm-n') a_n_sub_m = parse(a, 'n-m') a_2m_add_n = parse(a, '2m+n') a_2m_sub_n = parse(a, '2m-n') a_2n_sub_m = parse(a, '2n-m') b_m = parse(b, 'm') b_n = parse(b, 'n') b_m_add_n = parse(b, 'm+n') if a: raise ValueError("unsupported key: %r" % a.keys()[0]) if b: raise ValueError("unsupported key: %r" % b.keys()[0]) s = 0 outer = ctx.one m = ctx.mpf(0) ok_count = 0 prec = ctx.prec maxterms = kwargs.get('maxterms', 20*prec) try: ctx.prec += 10 tol = +ctx.eps while 1: inner_sign = 1 outer_sign = 1 inner_a = list(a_n) inner_b = list(b_n) outer_a = [a+m for a in a_m] outer_b = [b+m for b in b_m] # (a)_{m+n} = (a)_m (a+m)_n for a in a_m_add_n: a = a+m inner_a.append(a) outer_a.append(a) # (b)_{m+n} = (b)_m (b+m)_n for b in b_m_add_n: b = b+m inner_b.append(b) outer_b.append(b) # (a)_{n-m} = (a-m)_n / (a-m)_m for a in a_n_sub_m: inner_a.append(a-m) outer_b.append(a-m-1) # (a)_{m-n} = (-1)^(m+n) (1-a-m)_m / (1-a-m)_n for a in a_m_sub_n: inner_sign *= (-1) outer_sign *= (-1)**(m) inner_b.append(1-a-m) outer_a.append(-a-m) # (a)_{2m+n} = (a)_{2m} (a+2m)_n for a in a_2m_add_n: inner_a.append(a+2*m) outer_a.append((a+2*m)*(1+a+2*m)) # (a)_{2m-n} = (-1)^(2m+n) (1-a-2m)_{2m} / (1-a-2m)_n for a in a_2m_sub_n: inner_sign *= (-1) inner_b.append(1-a-2*m) outer_a.append((a+2*m)*(1+a+2*m)) # (a)_{2n-m} = 4^n ((a-m)/2)_n ((a-m+1)/2)_n / (a-m)_m for a in a_2n_sub_m: inner_sign *= 4 inner_a.append(0.5*(a-m)) inner_a.append(0.5*(a-m+1)) outer_b.append(a-m-1) inner = ctx.hyper(inner_a, inner_b, inner_sign*y, zeroprec=ctx.prec, **kwargs) term = outer * inner * outer_sign if abs(term) < tol: ok_count += 1 else: ok_count = 0 if ok_count >= 3 or not outer: break s += term for a in outer_a: outer *= a for b in outer_b: outer /= b m += 1 outer = outer * x / m if m > maxterms: raise ctx.NoConvergence("maxterms exceeded in hyper2d") finally: ctx.prec = prec return +s """ @defun def kampe_de_feriet(ctx,a,b,c,d,e,f,x,y,**kwargs): return ctx.hyper2d({'m+n':a,'m':b,'n':c}, {'m+n':d,'m':e,'n':f}, x,y, **kwargs) """ @defun def bihyper(ctx, a_s, b_s, z, **kwargs): r""" Evaluates the bilateral hypergeometric series .. math :: \,_AH_B(a_1, \ldots, a_k; b_1, \ldots, b_B; z) = \sum_{n=-\infty}^{\infty} \frac{(a_1)_n \ldots (a_A)_n} {(b_1)_n \ldots (b_B)_n} \, z^n where, for direct convergence, `A = B` and `|z| = 1`, although a regularized sum exists more generally by considering the bilateral series as a sum of two ordinary hypergeometric functions. In order for the series to make sense, none of the parameters may be integers. **Examples** The value of `\,_2H_2` at `z = 1` is given by Dougall's formula:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a,b,c,d = 0.5, 1.5, 2.25, 3.25 >>> bihyper([a,b],[c,d],1) -14.49118026212345786148847 >>> gammaprod([c,d,1-a,1-b,c+d-a-b-1],[c-a,d-a,c-b,d-b]) -14.49118026212345786148847 The regularized function `\,_1H_0` can be expressed as the sum of one `\,_2F_0` function and one `\,_1F_1` function:: >>> a = mpf(0.25) >>> z = mpf(0.75) >>> bihyper([a], [], z) (0.2454393389657273841385582 + 0.2454393389657273841385582j) >>> hyper([a,1],[],z) + (hyper([1],[1-a],-1/z)-1) (0.2454393389657273841385582 + 0.2454393389657273841385582j) >>> hyper([a,1],[],z) + hyper([1],[2-a],-1/z)/z/(a-1) (0.2454393389657273841385582 + 0.2454393389657273841385582j) **References** 1. [Slater]_ (chapter 6: "Bilateral Series", pp. 180-189) 2. [Wikipedia]_ http://en.wikipedia.org/wiki/Bilateral_hypergeometric_series """ z = ctx.convert(z) c_s = a_s + b_s p = len(a_s) q = len(b_s) if (p, q) == (0,0) or (p, q) == (1,1): return ctx.zero * z neg = (p-q) % 2 def h(*c_s): a_s = list(c_s[:p]) b_s = list(c_s[p:]) aa_s = [2-b for b in b_s] bb_s = [2-a for a in a_s] rp = [(-1)**neg * z] + [1-b for b in b_s] + [1-a for a in a_s] rc = [-1] + [1]*len(b_s) + [-1]*len(a_s) T1 = [], [], [], [], a_s + [1], b_s, z T2 = rp, rc, [], [], aa_s + [1], bb_s, (-1)**neg / z return T1, T2 return ctx.hypercomb(h, c_s, **kwargs)
51,570
35.471711
104
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/qfunctions.py
from .functions import defun, defun_wrapped @defun def qp(ctx, a, q=None, n=None, **kwargs): r""" Evaluates the q-Pochhammer symbol (or q-rising factorial) .. math :: (a; q)_n = \prod_{k=0}^{n-1} (1-a q^k) where `n = \infty` is permitted if `|q| < 1`. Called with two arguments, ``qp(a,q)`` computes `(a;q)_{\infty}`; with a single argument, ``qp(q)`` computes `(q;q)_{\infty}`. The special case .. math :: \phi(q) = (q; q)_{\infty} = \prod_{k=1}^{\infty} (1-q^k) = \sum_{k=-\infty}^{\infty} (-1)^k q^{(3k^2-k)/2} is also known as the Euler function, or (up to a factor `q^{-1/24}`) the Dedekind eta function. **Examples** If `n` is a positive integer, the function amounts to a finite product:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qp(2,3,5) -725305.0 >>> fprod(1-2*3**k for k in range(5)) -725305.0 >>> qp(2,3,0) 1.0 Complex arguments are allowed:: >>> qp(2-1j, 0.75j) (0.4628842231660149089976379 + 4.481821753552703090628793j) The regular Pochhammer symbol `(a)_n` is obtained in the following limit as `q \to 1`:: >>> a, n = 4, 7 >>> limit(lambda q: qp(q**a,q,n) / (1-q)**n, 1) 604800.0 >>> rf(a,n) 604800.0 The Taylor series of the reciprocal Euler function gives the partition function `P(n)`, i.e. the number of ways of writing `n` as a sum of positive integers:: >>> taylor(lambda q: 1/qp(q), 0, 10) [1.0, 1.0, 2.0, 3.0, 5.0, 7.0, 11.0, 15.0, 22.0, 30.0, 42.0] Special values include:: >>> qp(0) 1.0 >>> findroot(diffun(qp), -0.4) # location of maximum -0.4112484791779547734440257 >>> qp(_) 1.228348867038575112586878 The q-Pochhammer symbol is related to the Jacobi theta functions. For example, the following identity holds:: >>> q = mpf(0.5) # arbitrary >>> qp(q) 0.2887880950866024212788997 >>> root(3,-2)*root(q,-24)*jtheta(2,pi/6,root(q,6)) 0.2887880950866024212788997 """ a = ctx.convert(a) if n is None: n = ctx.inf else: n = ctx.convert(n) if n < 0: raise ValueError("n cannot be negative") if q is None: q = a else: q = ctx.convert(q) if n == 0: return ctx.one + 0*(a+q) infinite = (n == ctx.inf) same = (a == q) if infinite: if abs(q) >= 1: if same and (q == -1 or q == 1): return ctx.zero * q raise ValueError("q-function only defined for |q| < 1") elif q == 0: return ctx.one - a maxterms = kwargs.get('maxterms', 50*ctx.prec) if infinite and same: # Euler's pentagonal theorem def terms(): t = 1 yield t k = 1 x1 = q x2 = q**2 while 1: yield (-1)**k * x1 yield (-1)**k * x2 x1 *= q**(3*k+1) x2 *= q**(3*k+2) k += 1 if k > maxterms: raise ctx.NoConvergence return ctx.sum_accurately(terms) # return ctx.nprod(lambda k: 1-a*q**k, [0,n-1]) def factors(): k = 0 r = ctx.one while 1: yield 1 - a*r r *= q k += 1 if k >= n: raise StopIteration if k > maxterms: raise ctx.NoConvergence return ctx.mul_accurately(factors) @defun_wrapped def qgamma(ctx, z, q, **kwargs): r""" Evaluates the q-gamma function .. math :: \Gamma_q(z) = \frac{(q; q)_{\infty}}{(q^z; q)_{\infty}} (1-q)^{1-z}. **Examples** Evaluation for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qgamma(4,0.75) 4.046875 >>> qgamma(6,6) 121226245.0 >>> qgamma(3+4j, 0.5j) (0.1663082382255199834630088 + 0.01952474576025952984418217j) The q-gamma function satisfies a functional equation similar to that of the ordinary gamma function:: >>> q = mpf(0.25) >>> z = mpf(2.5) >>> qgamma(z+1,q) 1.428277424823760954685912 >>> (1-q**z)/(1-q)*qgamma(z,q) 1.428277424823760954685912 """ if abs(q) > 1: return ctx.qgamma(z,1/q)*q**((z-2)*(z-1)*0.5) return ctx.qp(q, q, None, **kwargs) / \ ctx.qp(q**z, q, None, **kwargs) * (1-q)**(1-z) @defun_wrapped def qfac(ctx, z, q, **kwargs): r""" Evaluates the q-factorial, .. math :: [n]_q! = (1+q)(1+q+q^2)\cdots(1+q+\cdots+q^{n-1}) or more generally .. math :: [z]_q! = \frac{(q;q)_z}{(1-q)^z}. **Examples** >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qfac(0,0) 1.0 >>> qfac(4,3) 2080.0 >>> qfac(5,6) 121226245.0 >>> qfac(1+1j, 2+1j) (0.4370556551322672478613695 + 0.2609739839216039203708921j) """ if ctx.isint(z) and ctx._re(z) > 0: n = int(ctx._re(z)) return ctx.qp(q, q, n, **kwargs) / (1-q)**n return ctx.qgamma(z+1, q, **kwargs) @defun def qhyper(ctx, a_s, b_s, q, z, **kwargs): r""" Evaluates the basic hypergeometric series or hypergeometric q-series .. math :: \,_r\phi_s \left[\begin{matrix} a_1 & a_2 & \ldots & a_r \\ b_1 & b_2 & \ldots & b_s \end{matrix} ; q,z \right] = \sum_{n=0}^\infty \frac{(a_1;q)_n, \ldots, (a_r;q)_n} {(b_1;q)_n, \ldots, (b_s;q)_n} \left((-1)^n q^{n\choose 2}\right)^{1+s-r} \frac{z^n}{(q;q)_n} where `(a;q)_n` denotes the q-Pochhammer symbol (see :func:`~mpmath.qp`). **Examples** Evaluation works for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qhyper([0.5], [2.25], 0.25, 4) -0.1975849091263356009534385 >>> qhyper([0.5], [2.25], 0.25-0.25j, 4) (2.806330244925716649839237 + 3.568997623337943121769938j) >>> qhyper([1+j], [2,3+0.5j], 0.25, 3+4j) (9.112885171773400017270226 - 1.272756997166375050700388j) Comparing with a summation of the defining series, using :func:`~mpmath.nsum`:: >>> b, q, z = 3, 0.25, 0.5 >>> qhyper([], [b], q, z) 0.6221136748254495583228324 >>> nsum(lambda n: z**n / qp(q,q,n)/qp(b,q,n) * q**(n*(n-1)), [0,inf]) 0.6221136748254495583228324 """ #a_s = [ctx._convert_param(a)[0] for a in a_s] #b_s = [ctx._convert_param(b)[0] for b in b_s] #q = ctx._convert_param(q)[0] a_s = [ctx.convert(a) for a in a_s] b_s = [ctx.convert(b) for b in b_s] q = ctx.convert(q) z = ctx.convert(z) r = len(a_s) s = len(b_s) d = 1+s-r maxterms = kwargs.get('maxterms', 50*ctx.prec) def terms(): t = ctx.one yield t qk = 1 k = 0 x = 1 while 1: for a in a_s: p = 1 - a*qk t *= p for b in b_s: p = 1 - b*qk if not p: raise ValueError t /= p t *= z x *= (-1)**d * qk ** d qk *= q t /= (1 - qk) k += 1 yield t * x if k > maxterms: raise ctx.NoConvergence return ctx.sum_accurately(terms)
7,646
26.213523
78
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/functions.py
from ..libmp.backend import xrange class SpecialFunctions(object): """ This class implements special functions using high-level code. Elementary and some other functions (e.g. gamma function, basecase hypergeometric series) are assumed to be predefined by the context as "builtins" or "low-level" functions. """ defined_functions = {} # The series for the Jacobi theta functions converge for |q| < 1; # in the current implementation they throw a ValueError for # abs(q) > THETA_Q_LIM THETA_Q_LIM = 1 - 10**-7 def __init__(self): cls = self.__class__ for name in cls.defined_functions: f, wrap = cls.defined_functions[name] cls._wrap_specfun(name, f, wrap) self.mpq_1 = self._mpq((1,1)) self.mpq_0 = self._mpq((0,1)) self.mpq_1_2 = self._mpq((1,2)) self.mpq_3_2 = self._mpq((3,2)) self.mpq_1_4 = self._mpq((1,4)) self.mpq_1_16 = self._mpq((1,16)) self.mpq_3_16 = self._mpq((3,16)) self.mpq_5_2 = self._mpq((5,2)) self.mpq_3_4 = self._mpq((3,4)) self.mpq_7_4 = self._mpq((7,4)) self.mpq_5_4 = self._mpq((5,4)) self.mpq_1_3 = self._mpq((1,3)) self.mpq_2_3 = self._mpq((2,3)) self.mpq_4_3 = self._mpq((4,3)) self.mpq_1_6 = self._mpq((1,6)) self.mpq_5_6 = self._mpq((5,6)) self.mpq_5_3 = self._mpq((5,3)) self._misc_const_cache = {} self._aliases.update({ 'phase' : 'arg', 'conjugate' : 'conj', 'nthroot' : 'root', 'polygamma' : 'psi', 'hurwitz' : 'zeta', #'digamma' : 'psi0', #'trigamma' : 'psi1', #'tetragamma' : 'psi2', #'pentagamma' : 'psi3', 'fibonacci' : 'fib', 'factorial' : 'fac', }) self.zetazero_memoized = self.memoize(self.zetazero) # Default -- do nothing @classmethod def _wrap_specfun(cls, name, f, wrap): setattr(cls, name, f) # Optional fast versions of common functions in common cases. # If not overridden, default (generic hypergeometric series) # implementations will be used def _besselj(ctx, n, z): raise NotImplementedError def _erf(ctx, z): raise NotImplementedError def _erfc(ctx, z): raise NotImplementedError def _gamma_upper_int(ctx, z, a): raise NotImplementedError def _expint_int(ctx, n, z): raise NotImplementedError def _zeta(ctx, s): raise NotImplementedError def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError def _ei(ctx, z): raise NotImplementedError def _e1(ctx, z): raise NotImplementedError def _ci(ctx, z): raise NotImplementedError def _si(ctx, z): raise NotImplementedError def _altzeta(ctx, s): raise NotImplementedError def defun_wrapped(f): SpecialFunctions.defined_functions[f.__name__] = f, True def defun(f): SpecialFunctions.defined_functions[f.__name__] = f, False def defun_static(f): setattr(SpecialFunctions, f.__name__, f) @defun_wrapped def cot(ctx, z): return ctx.one / ctx.tan(z) @defun_wrapped def sec(ctx, z): return ctx.one / ctx.cos(z) @defun_wrapped def csc(ctx, z): return ctx.one / ctx.sin(z) @defun_wrapped def coth(ctx, z): return ctx.one / ctx.tanh(z) @defun_wrapped def sech(ctx, z): return ctx.one / ctx.cosh(z) @defun_wrapped def csch(ctx, z): return ctx.one / ctx.sinh(z) @defun_wrapped def acot(ctx, z): if not z: return ctx.pi * 0.5 else: return ctx.atan(ctx.one / z) @defun_wrapped def asec(ctx, z): return ctx.acos(ctx.one / z) @defun_wrapped def acsc(ctx, z): return ctx.asin(ctx.one / z) @defun_wrapped def acoth(ctx, z): if not z: return ctx.pi * 0.5j else: return ctx.atanh(ctx.one / z) @defun_wrapped def asech(ctx, z): return ctx.acosh(ctx.one / z) @defun_wrapped def acsch(ctx, z): return ctx.asinh(ctx.one / z) @defun def sign(ctx, x): x = ctx.convert(x) if not x or ctx.isnan(x): return x if ctx._is_real_type(x): if x > 0: return ctx.one else: return -ctx.one return x / abs(x) @defun def agm(ctx, a, b=1): if b == 1: return ctx.agm1(a) a = ctx.convert(a) b = ctx.convert(b) return ctx._agm(a, b) @defun_wrapped def sinc(ctx, x): if ctx.isinf(x): return 1/x if not x: return x+1 return ctx.sin(x)/x @defun_wrapped def sincpi(ctx, x): if ctx.isinf(x): return 1/x if not x: return x+1 return ctx.sinpi(x)/(ctx.pi*x) # TODO: tests; improve implementation @defun_wrapped def expm1(ctx, x): if not x: return ctx.zero # exp(x) - 1 ~ x if ctx.mag(x) < -ctx.prec: return x + 0.5*x**2 # TODO: accurately eval the smaller of the real/imag parts return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1) @defun_wrapped def powm1(ctx, x, y): mag = ctx.mag one = ctx.one w = x**y - one M = mag(w) # Only moderate cancellation if M > -8: return w # Check for the only possible exact cases if not w: if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)): return w x1 = x - one magy = mag(y) lnx = ctx.ln(x) # Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2) if magy + mag(lnx) < -ctx.prec: return lnx*y + (lnx*y)**2/2 # TODO: accurately eval the smaller of the real/imag part return ctx.sum_accurately(lambda: iter([x**y, -1]), 1) @defun def _rootof1(ctx, k, n): k = int(k) n = int(n) k %= n if not k: return ctx.one elif 2*k == n: return -ctx.one elif 4*k == n: return ctx.j elif 4*k == 3*n: return -ctx.j return ctx.expjpi(2*ctx.mpf(k)/n) @defun def root(ctx, x, n, k=0): n = int(n) x = ctx.convert(x) if k: # Special case: there is an exact real root if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0): return -ctx.root(-x, n) # Multiply by root of unity prec = ctx.prec try: ctx.prec += 10 v = ctx.root(x, n, 0) * ctx._rootof1(k, n) finally: ctx.prec = prec return +v return ctx._nthroot(x, n) @defun def unitroots(ctx, n, primitive=False): gcd = ctx._gcd prec = ctx.prec try: ctx.prec += 10 if primitive: v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1] else: # TODO: this can be done *much* faster v = [ctx._rootof1(k,n) for k in range(n)] finally: ctx.prec = prec return [+x for x in v] @defun def arg(ctx, x): x = ctx.convert(x) re = ctx._re(x) im = ctx._im(x) return ctx.atan2(im, re) @defun def fabs(ctx, x): return abs(ctx.convert(x)) @defun def re(ctx, x): x = ctx.convert(x) if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers return x.real return x @defun def im(ctx, x): x = ctx.convert(x) if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers return x.imag return ctx.zero @defun def conj(ctx, x): x = ctx.convert(x) try: return x.conjugate() except AttributeError: return x @defun def polar(ctx, z): return (ctx.fabs(z), ctx.arg(z)) @defun_wrapped def rect(ctx, r, phi): return r * ctx.mpc(*ctx.cos_sin(phi)) @defun def log(ctx, x, b=None): if b is None: return ctx.ln(x) wp = ctx.prec + 20 return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp) @defun def log10(ctx, x): return ctx.log(x, 10) @defun def fmod(ctx, x, y): return ctx.convert(x) % ctx.convert(y) @defun def degrees(ctx, x): return x / ctx.degree @defun def radians(ctx, x): return x * ctx.degree def _lambertw_special(ctx, z, k): # W(0,0) = 0; all other branches are singular if not z: if not k: return z return ctx.ninf + z if z == ctx.inf: if k == 0: return z else: return z + 2*k*ctx.pi*ctx.j if z == ctx.ninf: return (-z) + (2*k+1)*ctx.pi*ctx.j # Some kind of nan or complex inf/nan? return ctx.ln(z) import math import cmath def _lambertw_approx_hybrid(z, k): imag_sign = 0 if hasattr(z, "imag"): x = float(z.real) y = z.imag if y: imag_sign = (-1) ** (y < 0) y = float(y) else: x = float(z) y = 0.0 imag_sign = 0 # hack to work regardless of whether Python supports -0.0 if not y: y = 0.0 z = complex(x,y) if k == 0: if -4.0 < y < 4.0 and -1.0 < x < 2.5: if imag_sign: # Taylor series in upper/lower half-plane if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j)) if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j)) if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j)) if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j)) # Taylor series near -1 if x < -0.5: if imag_sign >= 0: return (-0.318+1.34j) + (-0.697-0.593j)*(z+1) else: return (-0.318-1.34j) + (-0.697+0.593j)*(z+1) # return real type r = -0.367879441171442 if (not imag_sign) and x > r: z = x # Singularity near -1/e if x < -0.2: return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) # Taylor series near 0 if x < 0.5: return z # Simple linear approximation return 0.2 + 0.3*z if (not imag_sign) and x > 0.0: L1 = math.log(x); L2 = math.log(L1) else: L1 = cmath.log(z); L2 = cmath.log(L1) elif k == -1: # return real type r = -0.367879441171442 if (not imag_sign) and r < x < 0.0: z = x if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2: return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) if (not imag_sign) and -0.2 <= x < 0.0: L1 = math.log(-x) return L1 - math.log(-L1) else: if imag_sign == -1 and (not y) and x < 0.0: L1 = cmath.log(z) - 3.1415926535897932j else: L1 = cmath.log(z) - 6.2831853071795865j L2 = cmath.log(L1) return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2) def _lambertw_series(ctx, z, k, tol): """ Return rough approximation for W_k(z) from an asymptotic series, sufficiently accurate for the Halley iteration to converge to the correct value. """ magz = ctx.mag(z) if (-10 < magz < 900) and (-1000 < k < 1000): # Near the branch point at -1/e if magz < 1 and abs(z+0.36787944117144) < 0.05: if k == 0 or (k == -1 and ctx._im(z) >= 0) or \ (k == 1 and ctx._im(z) < 0): delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)]) cancellation = -ctx.mag(delta) ctx.prec += cancellation # Use series given in Corless et al. p = ctx.sqrt(2*(ctx.e*z+1)) ctx.prec -= cancellation u = {0:ctx.mpf(-1), 1:ctx.mpf(1)} a = {0:ctx.mpf(2), 1:ctx.mpf(-1)} if k != 0: p = -p s = ctx.zero # The series converges, so we could use it directly, but unless # *extremely* close, it is better to just use the first few # terms to get a good approximation for the iteration for l in xrange(max(2,cancellation)): if l not in u: a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l)) u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1) term = u[l] * p**l s += term if ctx.mag(term) < -tol: return s, True l += 1 ctx.prec += cancellation//2 return s, False if k == 0 or k == -1: return _lambertw_approx_hybrid(z, k), False if k == 0: if magz < -1: return z*(1-z), False L1 = ctx.ln(z) L2 = ctx.ln(L1) elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0): L1 = ctx.ln(-z) return L1 - ctx.ln(-L1), False else: # This holds both as z -> 0 and z -> inf. # Relative error is O(1/log(z)). L1 = ctx.ln(z) + 2j*ctx.pi*k L2 = ctx.ln(L1) return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False @defun def lambertw(ctx, z, k=0): z = ctx.convert(z) k = int(k) if not ctx.isnormal(z): return _lambertw_special(ctx, z, k) prec = ctx.prec ctx.prec += 20 + ctx.mag(k or 1) wp = ctx.prec tol = wp - 5 w, done = _lambertw_series(ctx, z, k, tol) if not done: # Use Halley iteration to solve w*exp(w) = z two = ctx.mpf(2) for i in xrange(100): ew = ctx.exp(w) wew = w*ew wewz = wew-z wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two)) if ctx.mag(wn-w) <= ctx.mag(wn) - tol: w = wn break else: w = wn if i == 100: ctx.warn("Lambert W iteration failed to converge for z = %s" % z) ctx.prec = prec return +w @defun_wrapped def bell(ctx, n, x=1): x = ctx.convert(x) if not n: if ctx.isnan(x): return x return type(x)(1) if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n): return x**n if n == 1: return x if n == 2: return x*(x+1) if x == 0: return ctx.sincpi(n) return _polyexp(ctx, n, x, True) / ctx.exp(x) def _polyexp(ctx, n, x, extra=False): def _terms(): if extra: yield ctx.sincpi(n) t = x k = 1 while 1: yield k**n * t k += 1 t = t*x/k return ctx.sum_accurately(_terms, check_step=4) @defun_wrapped def polyexp(ctx, s, z): if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s): return z**s if z == 0: return z*s if s == 0: return ctx.expm1(z) if s == 1: return ctx.exp(z)*z if s == 2: return ctx.exp(z)*z*(z+1) return _polyexp(ctx, s, z) @defun_wrapped def cyclotomic(ctx, n, z): n = int(n) if n < 0: raise ValueError("n cannot be negative") p = ctx.one if n == 0: return p if n == 1: return z - p if n == 2: return z + p # Use divisor product representation. Unfortunately, this sometimes # includes singularities for roots of unity, which we have to cancel out. # Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1). a_prod = 1 b_prod = 1 num_zeros = 0 num_poles = 0 for d in range(1,n+1): if not n % d: w = ctx.moebius(n//d) # Use powm1 because it is important that we get 0 only # if it really is exactly 0 b = -ctx.powm1(z, d) if b: p *= b**w else: if w == 1: a_prod *= d num_zeros += 1 elif w == -1: b_prod *= d num_poles += 1 #print n, num_zeros, num_poles if num_zeros: if num_zeros > num_poles: p *= 0 else: p *= a_prod p /= b_prod return p @defun def mangoldt(ctx, n): r""" Evaluates the von Mangoldt function `\Lambda(n) = \log p` if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise. **Examples** >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> [mangoldt(n) for n in range(-2,3)] [0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321] >>> mangoldt(6) 0.0 >>> mangoldt(7) 1.945910149055313305105353 >>> mangoldt(8) 0.6931471805599453094172321 >>> fsum(mangoldt(n) for n in range(101)) 94.04531122935739224600493 >>> fsum(mangoldt(n) for n in range(10001)) 10013.39669326311478372032 """ n = int(n) if n < 2: return ctx.zero if n % 2 == 0: # Must be a power of two if n & (n-1) == 0: return +ctx.ln2 else: return ctx.zero # TODO: the following could be generalized into a perfect # power testing function # --- # Look for a small factor for p in (3,5,7,11,13,17,19,23,29,31): if not n % p: q, r = n // p, 0 while q > 1: q, r = divmod(q, p) if r: return ctx.zero return ctx.ln(p) if ctx.isprime(n): return ctx.ln(n) # Obviously, we could use arbitrary-precision arithmetic for this... if n > 10**30: raise NotImplementedError k = 2 while 1: p = int(n**(1./k) + 0.5) if p < 2: return ctx.zero if p ** k == n: if ctx.isprime(p): return ctx.ln(p) k += 1 @defun def stirling1(ctx, n, k, exact=False): v = ctx._stirling1(int(n), int(k)) if exact: return int(v) else: return ctx.mpf(v) @defun def stirling2(ctx, n, k, exact=False): v = ctx._stirling2(int(n), int(k)) if exact: return int(v) else: return ctx.mpf(v)
17,877
27.154331
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/elliptic.py
r""" Elliptic functions historically comprise the elliptic integrals and their inverses, and originate from the problem of computing the arc length of an ellipse. From a more modern point of view, an elliptic function is defined as a doubly periodic function, i.e. a function which satisfies .. math :: f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z) for some half-periods `\omega_1, \omega_2` with `\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic functions are the Jacobi elliptic functions. More broadly, this section includes quasi-doubly periodic functions (such as the Jacobi theta functions) and other functions useful in the study of elliptic functions. Many different conventions for the arguments of elliptic functions are in use. It is even standard to use different parameterizations for different functions in the same text or software (and mpmath is no exception). The usual parameters are the elliptic nome `q`, which usually must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary complex number); the elliptic modulus `k` (an arbitrary complex number); and the half-period ratio `\tau`, which usually must satisfy `\mathrm{Im}[\tau] > 0`. These quantities can be expressed in terms of each other using the following relations: .. math :: m = k^2 .. math :: \tau = i \frac{K(1-m)}{K(m)} .. math :: q = e^{i \pi \tau} .. math :: k = \frac{\vartheta_2^4(q)}{\vartheta_3^4(q)} In addition, an alternative definition is used for the nome in number theory, which we here denote by q-bar: .. math :: \bar{q} = q^2 = e^{2 i \pi \tau} For convenience, mpmath provides functions to convert between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`, :func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`). **References** 1. [AbramowitzStegun]_ 2. [WhittakerWatson]_ """ from .functions import defun, defun_wrapped def nome(ctx, m): m = ctx.convert(m) if not m: return m if m == ctx.one: return m if ctx.isnan(m): return m if ctx.isinf(m): if m == ctx.ninf: return type(m)(-1) else: return ctx.mpc(-1) a = ctx.ellipk(ctx.one-m) b = ctx.ellipk(m) v = ctx.exp(-ctx.pi*a/b) if not ctx._im(m) and ctx._re(m) < 1: if ctx._is_real_type(m): return v.real else: return v.real + 0j elif m == 2: v = ctx.mpc(0, v.imag) return v @defun_wrapped def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): r""" Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qfrom(q=0.25) 0.25 >>> qfrom(m=mfrom(q=0.25)) 0.25 >>> qfrom(k=kfrom(q=0.25)) 0.25 >>> qfrom(tau=taufrom(q=0.25)) (0.25 + 0.0j) >>> qfrom(qbar=qbarfrom(q=0.25)) 0.25 """ if q is not None: return ctx.convert(q) if m is not None: return nome(ctx, m) if k is not None: return nome(ctx, ctx.convert(k)**2) if tau is not None: return ctx.expjpi(tau) if qbar is not None: return ctx.sqrt(qbar) @defun_wrapped def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): r""" Returns the number-theoretic nome `\bar q`, given any of `q, m, k, \tau, \bar{q}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> qbarfrom(qbar=0.25) 0.25 >>> qbarfrom(q=qfrom(qbar=0.25)) 0.25 >>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned 0.25 >>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned 0.25 >>> qbarfrom(tau=taufrom(qbar=0.25)) (0.25 + 0.0j) """ if qbar is not None: return ctx.convert(qbar) if q is not None: return ctx.convert(q) ** 2 if m is not None: return nome(ctx, m) ** 2 if k is not None: return nome(ctx, ctx.convert(k)**2) ** 2 if tau is not None: return ctx.expjpi(2*tau) @defun_wrapped def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): r""" Returns the elliptic half-period ratio `\tau`, given any of `q, m, k, \tau, \bar{q}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> taufrom(tau=0.5j) (0.0 + 0.5j) >>> taufrom(q=qfrom(tau=0.5j)) (0.0 + 0.5j) >>> taufrom(m=mfrom(tau=0.5j)) (0.0 + 0.5j) >>> taufrom(k=kfrom(tau=0.5j)) (0.0 + 0.5j) >>> taufrom(qbar=qbarfrom(tau=0.5j)) (0.0 + 0.5j) """ if tau is not None: return ctx.convert(tau) if m is not None: m = ctx.convert(m) return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m) if k is not None: k = ctx.convert(k) return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2) if q is not None: return ctx.log(q) / (ctx.pi*ctx.j) if qbar is not None: qbar = ctx.convert(qbar) return ctx.log(qbar) / (2*ctx.pi*ctx.j) @defun_wrapped def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): r""" Returns the elliptic modulus `k`, given any of `q, m, k, \tau, \bar{q}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> kfrom(k=0.25) 0.25 >>> kfrom(m=mfrom(k=0.25)) 0.25 >>> kfrom(q=qfrom(k=0.25)) 0.25 >>> kfrom(tau=taufrom(k=0.25)) (0.25 + 0.0j) >>> kfrom(qbar=qbarfrom(k=0.25)) 0.25 As `q \to 1` and `q \to -1`, `k` rapidly approaches `1` and `i \infty` respectively:: >>> kfrom(q=0.75) 0.9999999999999899166471767 >>> kfrom(q=-0.75) (0.0 + 7041781.096692038332790615j) >>> kfrom(q=1) 1 >>> kfrom(q=-1) (0.0 + +infj) """ if k is not None: return ctx.convert(k) if m is not None: return ctx.sqrt(m) if tau is not None: q = ctx.expjpi(tau) if qbar is not None: q = ctx.sqrt(qbar) if q == 1: return q if q == -1: return ctx.mpc(0,'inf') return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2 @defun_wrapped def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): r""" Returns the elliptic parameter `m`, given any of `q, m, k, \tau, \bar{q}`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> mfrom(m=0.25) 0.25 >>> mfrom(q=qfrom(m=0.25)) 0.25 >>> mfrom(k=kfrom(m=0.25)) 0.25 >>> mfrom(tau=taufrom(m=0.25)) (0.25 + 0.0j) >>> mfrom(qbar=qbarfrom(m=0.25)) 0.25 As `q \to 1` and `q \to -1`, `m` rapidly approaches `1` and `-\infty` respectively:: >>> mfrom(q=0.75) 0.9999999999999798332943533 >>> mfrom(q=-0.75) -49586681013729.32611558353 >>> mfrom(q=1) 1.0 >>> mfrom(q=-1) -inf The inverse nome as a function of `q` has an integer Taylor series expansion:: >>> taylor(lambda q: mfrom(q), 0, 7) [0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0] """ if m is not None: return m if k is not None: return k**2 if tau is not None: q = ctx.expjpi(tau) if qbar is not None: q = ctx.sqrt(qbar) if q == 1: return ctx.convert(q) if q == -1: return q*ctx.inf v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4 if ctx._is_real_type(q) and q < 0: v = v.real return v jacobi_spec = { 'sn' : ([3],[2],[1],[4], 'sin', 'tanh'), 'cn' : ([4],[2],[2],[4], 'cos', 'sech'), 'dn' : ([4],[3],[3],[4], '1', 'sech'), 'ns' : ([2],[3],[4],[1], 'csc', 'coth'), 'nc' : ([2],[4],[4],[2], 'sec', 'cosh'), 'nd' : ([3],[4],[4],[3], '1', 'cosh'), 'sc' : ([3],[4],[1],[2], 'tan', 'sinh'), 'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'), 'cd' : ([3],[2],[2],[3], 'cos', '1'), 'cs' : ([4],[3],[2],[1], 'cot', 'csch'), 'dc' : ([2],[3],[3],[2], 'sec', '1'), 'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'), 'cc' : None, 'ss' : None, 'nn' : None, 'dd' : None } @defun def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None): try: S = jacobi_spec[kind] except KeyError: raise ValueError("First argument must be a two-character string " "containing 's', 'c', 'd' or 'n', e.g.: 'sn'") if u is None: def f(*args, **kwargs): return ctx.ellipfun(kind, *args, **kwargs) f.__name__ = kind return f prec = ctx.prec try: ctx.prec += 10 u = ctx.convert(u) q = ctx.qfrom(m=m, q=q, k=k, tau=tau) if S is None: v = ctx.one + 0*q*u elif q == ctx.zero: if S[4] == '1': v = ctx.one else: v = getattr(ctx, S[4])(u) v += 0*q*u elif q == ctx.one: if S[5] == '1': v = ctx.one else: v = getattr(ctx, S[5])(u) v += 0*q*u else: t = u / ctx.jtheta(3, 0, q)**2 v = ctx.one for a in S[0]: v *= ctx.jtheta(a, 0, q) for b in S[1]: v /= ctx.jtheta(b, 0, q) for c in S[2]: v *= ctx.jtheta(c, t, q) for d in S[3]: v /= ctx.jtheta(d, t, q) finally: ctx.prec = prec return +v @defun_wrapped def kleinj(ctx, tau=None, **kwargs): r""" Evaluates the Klein j-invariant, which is a modular function defined for `\tau` in the upper half-plane as .. math :: J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)} where `g_2` and `g_3` are the modular invariants of the Weierstrass elliptic function, .. math :: g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4} g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}. An alternative, common notation is that of the j-function `j(\tau) = 1728 J(\tau)`. **Plots** .. literalinclude :: /plots/kleinj.py .. image :: /plots/kleinj.png .. literalinclude :: /plots/kleinj2.py .. image :: /plots/kleinj2.png **Examples** Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> tau = 0.625+0.75*j >>> tau = 0.625+0.75*j >>> kleinj(tau) (-0.1507492166511182267125242 + 0.07595948379084571927228948j) >>> kleinj(tau+1) (-0.1507492166511182267125242 + 0.07595948379084571927228948j) >>> kleinj(-1/tau) (-0.1507492166511182267125242 + 0.07595948379084571927228946j) The j-function has a famous Laurent series expansion in terms of the nome `\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`:: >>> mp.dps = 15 >>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True) [1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0] The j-function admits exact evaluation at special algebraic points related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163:: >>> @extraprec(10) ... def h(n): ... v = (1+sqrt(n)*j) ... if n > 2: ... v *= 0.5 ... return v ... >>> mp.dps = 25 >>> for n in [1,2,3,7,11,19,43,67,163]: ... n, chop(1728*kleinj(h(n))) ... (1, 1728.0) (2, 8000.0) (3, 0.0) (7, -3375.0) (11, -32768.0) (19, -884736.0) (43, -884736000.0) (67, -147197952000.0) (163, -262537412640768000.0) Also at other special points, the j-function assumes explicit algebraic values, e.g.:: >>> chop(1728*kleinj(j*sqrt(5))) 1264538.909475140509320227 >>> identify(cbrt(_)) # note: not simplified '((100+sqrt(13520))/2)' >>> (50+26*sqrt(5))**3 1264538.909475140509320227 """ q = ctx.qfrom(tau=tau, **kwargs) t2 = ctx.jtheta(2,0,q) t3 = ctx.jtheta(3,0,q) t4 = ctx.jtheta(4,0,q) P = (t2**8 + t3**8 + t4**8)**3 Q = 54*(t2*t3*t4)**8 return P/Q def RF_calc(ctx, x, y, z, r): if y == z: return RC_calc(ctx, x, y, r) if x == z: return RC_calc(ctx, y, x, r) if x == y: return RC_calc(ctx, z, x, r) if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)): if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z): return x*y*z if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z): return ctx.zero xm,ym,zm = x,y,z A0 = Am = (x+y+z)/3 Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z)) g = ctx.mpf(0.25) pow4 = ctx.one m = 0 while 1: xs = ctx.sqrt(xm) ys = ctx.sqrt(ym) zs = ctx.sqrt(zm) lm = xs*ys + xs*zs + ys*zs Am1 = (Am+lm)*g xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g if pow4 * Q < abs(Am): break Am = Am1 m += 1 pow4 *= g t = pow4/Am X = (A0-x)*t Y = (A0-y)*t Z = -X-Y E2 = X*Y-Z**2 E3 = X*Y*Z return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240 def RC_calc(ctx, x, y, r, pv=True): if not (ctx.isnormal(x) and ctx.isnormal(y)): if ctx.isinf(x) or ctx.isinf(y): return 1/(x*y) if y == 0: return ctx.inf if x == 0: return ctx.pi / ctx.sqrt(y) / 2 raise ValueError # Cauchy principal value if pv and ctx._im(y) == 0 and ctx._re(y) < 0: return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r) if x == y: return 1/ctx.sqrt(x) extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x)) ctx.prec += extraprec if ctx._is_real_type(x) and ctx._is_real_type(y): x = ctx._re(x) y = ctx._re(y) a = ctx.sqrt(x/y) if x < y: b = ctx.sqrt(y-x) v = ctx.acos(a)/b else: b = ctx.sqrt(x-y) v = ctx.acosh(a)/b else: sx = ctx.sqrt(x) sy = ctx.sqrt(y) v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy) ctx.prec -= extraprec return v def RJ_calc(ctx, x, y, z, p, r): if not (ctx.isnormal(x) and ctx.isnormal(y) and \ ctx.isnormal(z) and ctx.isnormal(p)): if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p): return x*y*z if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p): return ctx.zero if not p: return ctx.inf xm,ym,zm,pm = x,y,z,p A0 = Am = (x + y + z + 2*p)/5 delta = (p-x)*(p-y)*(p-z) Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p)) m = 0 g = ctx.mpf(0.25) pow4 = ctx.one S = 0 while 1: sx = ctx.sqrt(xm) sy = ctx.sqrt(ym) sz = ctx.sqrt(zm) sp = ctx.sqrt(pm) lm = sx*sy + sx*sz + sy*sz Am1 = (Am+lm)*g xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g dm = (sp+sx) * (sp+sy) * (sp+sz) em = delta * ctx.power(4, -3*m) / dm**2 if pow4 * Q < abs(Am): break T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm S += T pow4 *= g m += 1 Am = Am1 t = ctx.ldexp(1,-2*m) / Am X = (A0-x)*t Y = (A0-y)*t Z = (A0-z)*t P = (-X-Y-Z)/2 E2 = X*Y + X*Z + Y*Z - 3*P**2 E3 = X*Y*Z + 2*E2*P + 4*P**3 E4 = (2*X*Y*Z + E2*P + 3*P**3)*P E5 = X*Y*Z*P**2 P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5 Q = 24024 v1 = g**m * ctx.power(Am, -1.5) * P/Q v2 = 6*S return v1 + v2 @defun def elliprf(ctx, x, y, z): r""" Evaluates the Carlson symmetric elliptic integral of the first kind .. math :: R_F(x,y,z) = \frac{1}{2} \int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}} which is defined for `x,y,z \notin (-\infty,0)`, and with at most one of `x,y,z` being zero. For real `x,y,z \ge 0`, the principal square root is taken in the integrand. For complex `x,y,z`, the principal square root is taken as `t \to \infty` and as `t \to 0` non-principal branches are chosen as necessary so as to make the integrand continuous. **Examples** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> elliprf(0,1,1); pi/2 1.570796326794896619231322 1.570796326794896619231322 >>> elliprf(0,1,inf) 0.0 >>> elliprf(1,1,1) 1.0 >>> elliprf(2,2,2)**2 0.5 >>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0) +inf +inf +inf +inf Representing complete elliptic integrals in terms of `R_F`:: >>> m = mpf(0.75) >>> ellipk(m); elliprf(0,1-m,1) 2.156515647499643235438675 2.156515647499643235438675 >>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3 1.211056027568459524803563 1.211056027568459524803563 Some symmetries and argument transformations:: >>> x,y,z = 2,3,4 >>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x) 0.5840828416771517066928492 0.5840828416771517066928492 0.5840828416771517066928492 >>> k = mpf(100000) >>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z) 0.001847032121923321253219284 0.001847032121923321253219284 >>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x) >>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l) 0.5840828416771517066928492 0.5840828416771517066928492 >>> elliprf((x+l)/4,(y+l)/4,(z+l)/4) 0.5840828416771517066928492 Comparing with numerical integration:: >>> x,y,z = 2,3,4 >>> elliprf(x,y,z) 0.5840828416771517066928492 >>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5) >>> q = extradps(25)(quad) >>> q(f, [0,inf]) 0.5840828416771517066928492 With the following arguments, the square root in the integrand becomes discontinuous at `t = 1/2` if the principal branch is used. To obtain the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}` on `t \in (0, 1/2)`:: >>> x,y,z = j-1,j,0 >>> elliprf(x,y,z) (0.7961258658423391329305694 - 1.213856669836495986430094j) >>> -q(f, [0,0.5]) + q(f, [0.5,inf]) (0.7961258658423391329305694 - 1.213856669836495986430094j) The so-called *first lemniscate constant*, a transcendental number:: >>> elliprf(0,1,2) 1.31102877714605990523242 >>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1]) 1.31102877714605990523242 >>> gamma('1/4')**2/(4*sqrt(2*pi)) 1.31102877714605990523242 **References** 1. [Carlson]_ 2. [DLMF]_ Chapter 19. Elliptic Integrals """ x = ctx.convert(x) y = ctx.convert(y) z = ctx.convert(z) prec = ctx.prec try: ctx.prec += 20 tol = ctx.eps * 2**10 v = RF_calc(ctx, x, y, z, tol) finally: ctx.prec = prec return +v @defun def elliprc(ctx, x, y, pv=True): r""" Evaluates the degenerate Carlson symmetric elliptic integral of the first kind .. math :: R_C(x,y) = R_F(x,y,y) = \frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}. If `y \in (-\infty,0)`, either a value defined by continuity, or with *pv=True* the Cauchy principal value, can be computed. If `x \ge 0, y > 0`, the value can be expressed in terms of elementary functions as .. math :: R_C(x,y) = \begin{cases} \dfrac{1}{\sqrt{y-x}} \cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\ \dfrac{1}{\sqrt{y}}, & x = y \\ \dfrac{1}{\sqrt{x-y}} \cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\ \end{cases}. **Examples** Some special values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> elliprc(1,2)*4; elliprc(0,1)*2; +pi 3.141592653589793238462643 3.141592653589793238462643 3.141592653589793238462643 >>> elliprc(1,0) +inf >>> elliprc(5,5)**2 0.2 >>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf) 0.0 0.0 0.0 Comparing with the elementary closed-form solution:: >>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3')) 2.041630778983498390751238 2.041630778983498390751238 >>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5')) 1.875180765206547065111085 1.875180765206547065111085 Comparing with numerical integration:: >>> q = extradps(25)(quad) >>> elliprc(2, -3, pv=True) 0.3333969101113672670749334 >>> elliprc(2, -3, pv=False) (0.3333969101113672670749334 + 0.7024814731040726393156375j) >>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf]) (0.3333969101113672670749334 + 0.7024814731040726393156375j) """ x = ctx.convert(x) y = ctx.convert(y) prec = ctx.prec try: ctx.prec += 20 tol = ctx.eps * 2**10 v = RC_calc(ctx, x, y, tol, pv) finally: ctx.prec = prec return +v @defun def elliprj(ctx, x, y, z, p): r""" Evaluates the Carlson symmetric elliptic integral of the third kind .. math :: R_J(x,y,z,p) = \frac{3}{2} \int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}. Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand is defined so as to be continuous along the path of integration for complex values of the arguments. **Examples** Some values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> elliprj(1,1,1,1) 1.0 >>> elliprj(2,2,2,2); 1/(2*sqrt(2)) 0.3535533905932737622004222 0.3535533905932737622004222 >>> elliprj(0,1,2,2) 1.067937989667395702268688 >>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi)) 1.067937989667395702268688 >>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4 1.380226776765915172432054 1.380226776765915172432054 >>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0) +inf +inf +inf >>> elliprj(1,inf,1,0); elliprj(1,1,1,inf) 0.0 0.0 >>> chop(elliprj(1+j, 1-j, 1, 1)) 0.8505007163686739432927844 Scale transformation:: >>> x,y,z,p = 2,3,4,5 >>> k = mpf(100000) >>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p) 4.521291677592745527851168e-9 4.521291677592745527851168e-9 Comparing with numerical integration:: >>> elliprj(1,2,3,4) 0.2398480997495677621758617 >>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3))) >>> 1.5*quad(f, [0,inf]) 0.2398480997495677621758617 >>> elliprj(1,2+1j,3,4-2j) (0.216888906014633498739952 + 0.04081912627366673332369512j) >>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3))) >>> 1.5*quad(f, [0,inf]) (0.216888906014633498739952 + 0.04081912627366673332369511j) """ x = ctx.convert(x) y = ctx.convert(y) z = ctx.convert(z) p = ctx.convert(p) prec = ctx.prec try: ctx.prec += 20 tol = ctx.eps * 2**10 v = RJ_calc(ctx, x, y, z, p, tol) finally: ctx.prec = prec return +v @defun def elliprd(ctx, x, y, z): r""" Evaluates the degenerate Carlson symmetric elliptic integral of the third kind or Carlson elliptic integral of the second kind `R_D(x,y,z) = R_J(x,y,z,z)`. See :func:`~mpmath.elliprj` for additional information. **Examples** >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> elliprd(1,2,3) 0.2904602810289906442326534 >>> elliprj(1,2,3,3) 0.2904602810289906442326534 The so-called *second lemniscate constant*, a transcendental number:: >>> elliprd(0,2,1)/3 0.5990701173677961037199612 >>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1]) 0.5990701173677961037199612 >>> gamma('3/4')**2/sqrt(2*pi) 0.5990701173677961037199612 """ return ctx.elliprj(x,y,z,z) @defun def elliprg(ctx, x, y, z): r""" Evaluates the Carlson completely symmetric elliptic integral of the second kind .. math :: R_G(x,y,z) = \frac{1}{4} \int_0^{\infty} \frac{t}{\sqrt{(t+x)(t+y)(t+z)}} \left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt. **Examples** Evaluation for real and complex arguments:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> elliprg(0,1,1)*4; +pi 3.141592653589793238462643 3.141592653589793238462643 >>> elliprg(0,0.5,1) 0.6753219405238377512600874 >>> chop(elliprg(1+j, 1-j, 2)) 1.172431327676416604532822 A double integral that can be evaluated in terms of `R_G`:: >>> x,y,z = 2,3,4 >>> def f(t,u): ... st = fp.sin(t); ct = fp.cos(t) ... su = fp.sin(u); cu = fp.cos(u) ... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st ... >>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13) 1.725503028069 >>> nprint(elliprg(x,y,z), 13) 1.725503028069 """ x = ctx.convert(x) y = ctx.convert(y) z = ctx.convert(z) zeros = (not x) + (not y) + (not z) if zeros == 3: return (x+y+z)*0 if zeros == 2: if x: return 0.5*ctx.sqrt(x) if y: return 0.5*ctx.sqrt(y) return 0.5*ctx.sqrt(z) if zeros == 1: if not z: x, z = z, x def terms(): T1 = 0.5*z*ctx.elliprf(x,y,z) T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3 T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z) return T1,T2,T3 return ctx.sum_accurately(terms) @defun_wrapped def ellipf(ctx, phi, m): r""" Evaluates the Legendre incomplete elliptic integral of the first kind .. math :: F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}} or equivalently .. math :: F(\phi,m) = \int_0^{\sin \phi} \frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}. The function reduces to a complete elliptic integral of the first kind (see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is, .. math :: F\left(\frac{\pi}{2}, m\right) = K(m). In the defining integral, it is assumed that the principal branch of the square root is taken and that the path of integration avoids crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, the function extends quasi-periodically as .. math :: F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}. **Plots** .. literalinclude :: /plots/ellipf.py .. image :: /plots/ellipf.png **Examples** Basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ellipf(0,1) 0.0 >>> ellipf(0,0) 0.0 >>> ellipf(1,0); ellipf(2+3j,0) 1.0 (2.0 + 3.0j) >>> ellipf(1,1); log(sec(1)+tan(1)) 1.226191170883517070813061 1.226191170883517070813061 >>> ellipf(pi/2, -0.5); ellipk(-0.5) 1.415737208425956198892166 1.415737208425956198892166 >>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1) +inf +inf >>> ellipf(1.5, 1) 3.340677542798311003320813 Comparing with numerical integration:: >>> z,m = 0.5, 1.25 >>> ellipf(z,m) 0.5287219202206327872978255 >>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z]) 0.5287219202206327872978255 The arguments may be complex numbers:: >>> ellipf(3j, 0.5) (0.0 + 1.713602407841590234804143j) >>> ellipf(3+4j, 5-6j) (1.269131241950351323305741 - 0.3561052815014558335412538j) >>> z,m = 2+3j, 1.25 >>> k = 1011 >>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m) (4086.184383622179764082821 - 3003.003538923749396546871j) (4086.184383622179764082821 - 3003.003538923749396546871j) For `|\Re(z)| < \pi/2`, the function can be expressed as a hypergeometric series of two variables (see :func:`~mpmath.appellf1`):: >>> z,m = 0.5, 0.25 >>> ellipf(z,m) 0.5050887275786480788831083 >>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2) 0.5050887275786480788831083 """ z = phi if not (ctx.isnormal(z) and ctx.isnormal(m)): if m == 0: return z + m if z == 0: return z * m if m == ctx.inf or m == ctx.ninf: return z/m raise ValueError x = z.real ctx.prec += max(0, ctx.mag(x)) pi = +ctx.pi away = abs(x) > pi/2 if m == 1: if away: return ctx.inf if away: d = ctx.nint(x/pi) z = z-pi*d P = 2*d*ctx.ellipk(m) else: P = 0 c, s = ctx.cos_sin(z) return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P @defun_wrapped def ellipe(ctx, *args): r""" Called with a single argument `m`, evaluates the Legendre complete elliptic integral of the second kind, `E(m)`, defined by .. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\, \frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right). Called with two arguments `\phi, m`, evaluates the incomplete elliptic integral of the second kind .. math :: E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt = \int_0^{\sin z} \frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt. The incomplete integral reduces to a complete integral when `\phi = \frac{\pi}{2}`; that is, .. math :: E\left(\frac{\pi}{2}, m\right) = E(m). In the defining integral, it is assumed that the principal branch of the square root is taken and that the path of integration avoids crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`, the function extends quasi-periodically as .. math :: E(\phi + n \pi, m) = 2 n E(m) + F(\phi,m), n \in \mathbb{Z}. **Plots** .. literalinclude :: /plots/ellipe.py .. image :: /plots/ellipe.png **Examples for the complete integral** Basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ellipe(0) 1.570796326794896619231322 >>> ellipe(1) 1.0 >>> ellipe(-1) 1.910098894513856008952381 >>> ellipe(2) (0.5990701173677961037199612 + 0.5990701173677961037199612j) >>> ellipe(inf) (0.0 + +infj) >>> ellipe(-inf) +inf Verifying the defining integral and hypergeometric representation:: >>> ellipe(0.5) 1.350643881047675502520175 >>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2]) 1.350643881047675502520175 >>> pi/2*hyp2f1(0.5,-0.5,1,0.5) 1.350643881047675502520175 Evaluation is supported for arbitrary complex `m`:: >>> ellipe(0.5+0.25j) (1.360868682163129682716687 - 0.1238733442561786843557315j) >>> ellipe(3+4j) (1.499553520933346954333612 - 1.577879007912758274533309j) A definite integral:: >>> quad(ellipe, [0,1]) 1.333333333333333333333333 **Examples for the incomplete integral** Basic values and limits:: >>> ellipe(0,1) 0.0 >>> ellipe(0,0) 0.0 >>> ellipe(1,0) 1.0 >>> ellipe(2+3j,0) (2.0 + 3.0j) >>> ellipe(1,1); sin(1) 0.8414709848078965066525023 0.8414709848078965066525023 >>> ellipe(pi/2, -0.5); ellipe(-0.5) 1.751771275694817862026502 1.751771275694817862026502 >>> ellipe(pi/2, 1); ellipe(-pi/2, 1) 1.0 -1.0 >>> ellipe(1.5, 1) 0.9974949866040544309417234 Comparing with numerical integration:: >>> z,m = 0.5, 1.25 >>> ellipe(z,m) 0.4740152182652628394264449 >>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z]) 0.4740152182652628394264449 The arguments may be complex numbers:: >>> ellipe(3j, 0.5) (0.0 + 7.551991234890371873502105j) >>> ellipe(3+4j, 5-6j) (24.15299022574220502424466 + 75.2503670480325997418156j) >>> k = 35 >>> z,m = 2+3j, 1.25 >>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m) (48.30138799412005235090766 + 17.47255216721987688224357j) (48.30138799412005235090766 + 17.47255216721987688224357j) For `|\Re(z)| < \pi/2`, the function can be expressed as a hypergeometric series of two variables (see :func:`~mpmath.appellf1`):: >>> z,m = 0.5, 0.25 >>> ellipe(z,m) 0.4950017030164151928870375 >>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) 0.4950017030164151928870376 """ if len(args) == 1: return ctx._ellipe(args[0]) else: phi, m = args z = phi if not (ctx.isnormal(z) and ctx.isnormal(m)): if m == 0: return z + m if z == 0: return z * m if m == ctx.inf or m == ctx.ninf: return ctx.inf raise ValueError x = z.real ctx.prec += max(0, ctx.mag(x)) pi = +ctx.pi away = abs(x) > pi/2 if away: d = ctx.nint(x/pi) z = z-pi*d P = 2*d*ctx.ellipe(m) else: P = 0 def terms(): c, s = ctx.cos_sin(z) x = c**2 y = 1-m*s**2 RF = ctx.elliprf(x, y, 1) RD = ctx.elliprd(x, y, 1) return s*RF, -m*s**3*RD/3 return ctx.sum_accurately(terms) + P @defun_wrapped def ellippi(ctx, *args): r""" Called with three arguments `n, \phi, m`, evaluates the Legendre incomplete elliptic integral of the third kind .. math :: \Pi(n; \phi, m) = \int_0^{\phi} \frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} = \int_0^{\sin \phi} \frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}. Called with two arguments `n, m`, evaluates the complete elliptic integral of the third kind `\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`. In the defining integral, it is assumed that the principal branch of the square root is taken and that the path of integration avoids crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, the function extends quasi-periodically as .. math :: \Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}. **Plots** .. literalinclude :: /plots/ellippi.py .. image :: /plots/ellippi.png **Examples for the complete integral** Some basic values and limits:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> ellippi(0,-5); ellipk(-5) 0.9555039270640439337379334 0.9555039270640439337379334 >>> ellippi(inf,2) 0.0 >>> ellippi(2,inf) 0.0 >>> abs(ellippi(1,5)) +inf >>> abs(ellippi(0.25,1)) +inf Evaluation in terms of simpler functions:: >>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25) 1.956616279119236207279727 1.956616279119236207279727 >>> ellippi(3,0); pi/(2*sqrt(-2)) (0.0 - 1.11072073453959156175397j) (0.0 - 1.11072073453959156175397j) >>> ellippi(-3,0); pi/(2*sqrt(4)) 0.7853981633974483096156609 0.7853981633974483096156609 **Examples for the incomplete integral** Basic values and limits:: >>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5) 1.622944760954741603710555 1.622944760954741603710555 >>> ellippi(1,0,1) 0.0 >>> ellippi(inf,0,1) 0.0 >>> ellippi(0,0.25,0.5); ellipf(0.25,0.5) 0.2513040086544925794134591 0.2513040086544925794134591 >>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2 2.054332933256248668692452 2.054332933256248668692452 >>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75) 135.240868757890840755058 135.240868757890840755058 >>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3) 0.9190227391656969903987269 0.9190227391656969903987269 Complex arguments are supported:: >>> ellippi(0.5, 5+6j-2*pi, -7-8j) (-0.3612856620076747660410167 + 0.5217735339984807829755815j) Some degenerate cases:: >>> ellippi(1,1) +inf >>> ellippi(1,0) +inf >>> ellippi(1,2,0) +inf >>> ellippi(1,2,1) +inf >>> ellippi(1,0,1) 0.0 """ if len(args) == 2: n, m = args complete = True z = phi = ctx.pi/2 else: n, phi, m = args complete = False z = phi if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)): if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m): raise ValueError if complete: if m == 0: if n == 1: return ctx.inf return ctx.pi/(2*ctx.sqrt(1-n)) if n == 0: return ctx.ellipk(m) if ctx.isinf(n) or ctx.isinf(m): return ctx.zero else: if z == 0: return z if ctx.isinf(n): return ctx.zero if ctx.isinf(m): return ctx.zero if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m): raise ValueError if complete: if m == 1: if n == 1: return ctx.inf return -ctx.inf/ctx.sign(n-1) away = False else: x = z.real ctx.prec += max(0, ctx.mag(x)) pi = +ctx.pi away = abs(x) > pi/2 if away: d = ctx.nint(x/pi) z = z-pi*d P = 2*d*ctx.ellippi(n,m) if ctx.isinf(P): return ctx.inf else: P = 0 def terms(): if complete: c, s = ctx.zero, ctx.one else: c, s = ctx.cos_sin(z) x = c**2 y = 1-m*s**2 RF = ctx.elliprf(x, y, 1) RJ = ctx.elliprj(x, y, 1, 1-n*s**2) return s*RF, n*s**3*RJ/3 return ctx.sum_accurately(terms) + P
39,030
27.699265
86
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/bessel.py
from .functions import defun, defun_wrapped @defun def j0(ctx, x): """Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`.""" return ctx.besselj(0, x) @defun def j1(ctx, x): """Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`.""" return ctx.besselj(1, x) @defun def besselj(ctx, n, z, derivative=0, **kwargs): if type(n) is int: n_isint = True else: n = ctx.convert(n) n_isint = ctx.isint(n) if n_isint: n = int(ctx._re(n)) if n_isint and n < 0: return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs) z = ctx.convert(z) M = ctx.mag(z) if derivative: d = ctx.convert(derivative) # TODO: the integer special-casing shouldn't be necessary. # However, the hypergeometric series gets inaccurate for large d # because of inaccurate pole cancellation at a pole far from # zero (needs to be fixed in hypercomb or hypsum) if ctx.isint(d) and d >= 0: d = int(d) orig = ctx.prec try: ctx.prec += 15 v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z) for k in range(d+1)) finally: ctx.prec = orig v *= ctx.mpf(2)**(-d) else: def h(n,d): r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True) B = [0.5*(n-d+1), 0.5*(n-d+2)] T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)] return T v = ctx.hypercomb(h, [n,d], **kwargs) else: # Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20: try: return ctx._besselj(n, z) except NotImplementedError: pass if not z: if not n: v = ctx.one + n+z elif ctx.re(n) > 0: v = n*z else: v = ctx.inf + z + n else: #v = 0 orig = ctx.prec try: # XXX: workaround for accuracy in low level hypergeometric series # when alternating, large arguments ctx.prec += min(3*abs(M), ctx.prec) w = ctx.fmul(z, 0.5, exact=True) def h(n): r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True) return [([w], [n], [], [n+1], [], [n+1], r)] v = ctx.hypercomb(h, [n], **kwargs) finally: ctx.prec = orig v = +v return v @defun def besseli(ctx, n, z, derivative=0, **kwargs): n = ctx.convert(n) z = ctx.convert(z) if not z: if derivative: raise ValueError if not n: # I(0,0) = 1 return 1+n+z if ctx.isint(n): return 0*(n+z) r = ctx.re(n) if r == 0: return ctx.nan*(n+z) elif r > 0: return 0*(n+z) else: return ctx.inf+(n+z) M = ctx.mag(z) if derivative: d = ctx.convert(derivative) def h(n,d): r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True) B = [0.5*(n-d+1), 0.5*(n-d+2), n+1] T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)] return T v = ctx.hypercomb(h, [n,d], **kwargs) else: def h(n): w = ctx.fmul(z, 0.5, exact=True) r = ctx.fmul(w, w, prec=max(0,ctx.prec+M)) return [([w], [n], [], [n+1], [], [n+1], r)] v = ctx.hypercomb(h, [n], **kwargs) return v @defun_wrapped def bessely(ctx, n, z, derivative=0, **kwargs): if not z: if derivative: # Not implemented raise ValueError if not n: # ~ log(z/2) return -ctx.inf + (n+z) if ctx.im(n): return ctx.nan * (n+z) r = ctx.re(n) q = n+0.5 if ctx.isint(q): if n > 0: return -ctx.inf + (n+z) else: return 0 * (n+z) if r < 0 and int(ctx.floor(q)) % 2: return ctx.inf + (n+z) else: return ctx.ninf + (n+z) # XXX: use hypercomb ctx.prec += 10 m, d = ctx.nint_distance(n) if d < -ctx.prec: h = +ctx.eps ctx.prec *= 2 n += h elif d < 0: ctx.prec -= d # TODO: avoid cancellation for imaginary arguments cos, sin = ctx.cospi_sinpi(n) return (ctx.besselj(n,z,derivative,**kwargs)*cos - \ ctx.besselj(-n,z,derivative,**kwargs))/sin @defun_wrapped def besselk(ctx, n, z, **kwargs): if not z: return ctx.inf M = ctx.mag(z) if M < 1: # Represent as limit definition def h(n): r = (z/2)**2 T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r return T1, T2 # We could use the limit definition always, but it leads # to very bad cancellation (of exponentially large terms) # for large real z # Instead represent in terms of 2F0 else: ctx.prec += M def h(n): return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \ [n+0.5, 0.5-n], [], -1/(2*z))] return ctx.hypercomb(h, [n], **kwargs) @defun_wrapped def hankel1(ctx,n,x,**kwargs): return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs) @defun_wrapped def hankel2(ctx,n,x,**kwargs): return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs) @defun_wrapped def whitm(ctx,k,m,z,**kwargs): if z == 0: # M(k,m,z) = 0^(1/2+m) if ctx.re(m) > -0.5: return z elif ctx.re(m) < -0.5: return ctx.inf + z else: return ctx.nan * z x = ctx.fmul(-0.5, z, exact=True) y = 0.5+m return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs) @defun_wrapped def whitw(ctx,k,m,z,**kwargs): if z == 0: g = abs(ctx.re(m)) if g < 0.5: return z elif g > 0.5: return ctx.inf + z else: return ctx.nan * z x = ctx.fmul(-0.5, z, exact=True) y = 0.5+m return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs) @defun def hyperu(ctx, a, b, z, **kwargs): a, atype = ctx._convert_param(a) b, btype = ctx._convert_param(b) z = ctx.convert(z) if not z: if ctx.re(b) <= 1: return ctx.gammaprod([1-b],[a-b+1]) else: return ctx.inf + z bb = 1+a-b bb, bbtype = ctx._convert_param(bb) try: orig = ctx.prec try: ctx.prec += 10 v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec) return v / z**a finally: ctx.prec = orig except ctx.NoConvergence: pass def h(a,b): w = ctx.sinpi(b) T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z) T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z) return T1, T2 return ctx.hypercomb(h, [a,b], **kwargs) @defun def struveh(ctx,n,z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/ def h(n): return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)] return ctx.hypercomb(h, [n], **kwargs) @defun def struvel(ctx,n,z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/ def h(n): return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)] return ctx.hypercomb(h, [n], **kwargs) def _anger(ctx,which,v,z,**kwargs): v = ctx._convert_param(v)[0] z = ctx.convert(z) def h(v): b = ctx.mpq_1_2 u = v*b m = b*3 a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u c, s = ctx.cospi_sinpi(u) if which == 0: A, B = [b*z, s], [c] if which == 1: A, B = [b*z, -c], [s] w = ctx.square_exp_arg(z, mult=-0.25) T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w T2 = B, [1], [], [b1,b2], [1], [b1,b2], w return T1, T2 return ctx.hypercomb(h, [v], **kwargs) @defun def angerj(ctx, v, z, **kwargs): return _anger(ctx, 0, v, z, **kwargs) @defun def webere(ctx, v, z, **kwargs): return _anger(ctx, 1, v, z, **kwargs) @defun def lommels1(ctx, u, v, z, **kwargs): u = ctx._convert_param(u)[0] v = ctx._convert_param(v)[0] z = ctx.convert(z) def h(u,v): b = ctx.mpq_1_2 w = ctx.square_exp_arg(z, mult=-0.25) return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \ [b*(u-v+3),b*(u+v+3)], w), return ctx.hypercomb(h, [u,v], **kwargs) @defun def lommels2(ctx, u, v, z, **kwargs): u = ctx._convert_param(u)[0] v = ctx._convert_param(v)[0] z = ctx.convert(z) # Asymptotic expansion (GR p. 947) -- need to be careful # not to use for small arguments # def h(u,v): # b = ctx.mpq_1_2 # w = -(z/2)**(-2) # return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w), def h(u,v): b = ctx.mpq_1_2 w = ctx.square_exp_arg(z, mult=-0.25) T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w #c1 = ctx.cospi((u-v)*b) #c2 = ctx.cospi((u+v)*b) #s = ctx.sinpi(v) #r1 = (u-v+1)*b #r2 = (u+v+1)*b #T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w #T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w #T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w #T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w return T1, T2, T3 return ctx.hypercomb(h, [u,v], **kwargs) @defun def ber(ctx, n, z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/ def h(n): r = -(z/4)**4 cos, sin = ctx.cospi_sinpi(-0.75*n) T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r return T1, T2 return ctx.hypercomb(h, [n], **kwargs) @defun def bei(ctx, n, z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/ def h(n): r = -(z/4)**4 cos, sin = ctx.cospi_sinpi(0.75*n) T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r return T1, T2 return ctx.hypercomb(h, [n], **kwargs) @defun def ker(ctx, n, z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/ def h(n): r = -(z/4)**4 cos1, sin1 = ctx.cospi_sinpi(0.25*n) cos2, sin2 = ctx.cospi_sinpi(0.75*n) T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r return T1, T2, T3, T4 return ctx.hypercomb(h, [n], **kwargs) @defun def kei(ctx, n, z, **kwargs): n = ctx.convert(n) z = ctx.convert(z) # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/ def h(n): r = -(z/4)**4 cos1, sin1 = ctx.cospi_sinpi(0.75*n) cos2, sin2 = ctx.cospi_sinpi(0.25*n) T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r return T1, T2, T3, T4 return ctx.hypercomb(h, [n], **kwargs) # TODO: do this more generically? def c_memo(f): name = f.__name__ def f_wrapped(ctx): cache = ctx._misc_const_cache prec = ctx.prec p,v = cache.get(name, (-1,0)) if p >= prec: return +v else: cache[name] = (prec, f(ctx)) return cache[name][1] return f_wrapped @c_memo def _airyai_C1(ctx): return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3)) @c_memo def _airyai_C2(ctx): return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3)) @c_memo def _airybi_C1(ctx): return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3)) @c_memo def _airybi_C2(ctx): return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3) def _airybi_n2_inf(ctx): prec = ctx.prec try: v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi) finally: ctx.prec = prec return +v # Derivatives at z = 0 # TODO: could be expressed more elegantly using triple factorials def _airyderiv_0(ctx, z, n, ntype, which): if ntype == 'Z': if n < 0: return z r = ctx.mpq_1_3 prec = ctx.prec try: ctx.prec += 10 v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi if which == 0: v *= ctx.sinpi(2*(n+1)*r) v /= ctx.power(3,'2/3') else: v *= abs(ctx.sinpi(2*(n+1)*r)) v /= ctx.power(3,'1/6') finally: ctx.prec = prec return +v + z else: # singular (does the limit exist?) raise NotImplementedError @defun def airyai(ctx, z, derivative=0, **kwargs): z = ctx.convert(z) if derivative: n, ntype = ctx._convert_param(derivative) else: n = 0 # Values at infinities if not ctx.isnormal(z) and z: if n and ntype == 'Z': if n == -1: if z == ctx.inf: return ctx.mpf(1)/3 + 1/z if z == ctx.ninf: return ctx.mpf(-2)/3 + 1/z if n < -1: if z == ctx.inf: return z if z == ctx.ninf: return (-1)**n * (-z) if (not n) and z == ctx.inf or z == ctx.ninf: return 1/z # TODO: limits raise ValueError("essential singularity of Ai(z)") # Account for exponential scaling if z: extraprec = max(0, int(1.5*ctx.mag(z))) else: extraprec = 0 if n: if n == 1: def h(): # http://functions.wolfram.com/03.07.06.0005.01 if ctx._re(z) > 4: ctx.prec += extraprec w = z**1.5; r = -0.75/w; u = -2*w/3 ctx.prec -= extraprec C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4) return ([C],[1],[],[],[(-1,6),(7,6)],[],r), # http://functions.wolfram.com/03.07.26.0001.01 else: ctx.prec += extraprec w = z**3 / 9 ctx.prec -= extraprec C1 = _airyai_C1(ctx) * 0.5 C2 = _airyai_C2(ctx) T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w return T1, T2 return ctx.hypercomb(h, [], **kwargs) else: if z == 0: return _airyderiv_0(ctx, z, n, ntype, 0) # http://functions.wolfram.com/03.05.20.0004.01 def h(n): ctx.prec += extraprec w = z**3/9 ctx.prec -= extraprec q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \ [a1,a2], [b1,b2,b3], w a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \ [a1,a2], [b1,b2,b3], w return T1, T2 v = ctx.hypercomb(h, [n], **kwargs) if ctx._is_real_type(z) and ctx.isint(n): v = ctx._re(v) return v else: def h(): if ctx._re(z) > 4: # We could use 1F1, but it results in huge cancellation; # the following expansion is better. # TODO: asymptotic series for derivatives ctx.prec += extraprec w = z**1.5; r = -0.75/w; u = -2*w/3 ctx.prec -= extraprec C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4)) return ([C],[1],[],[],[(1,6),(5,6)],[],r), else: ctx.prec += extraprec w = z**3 / 9 ctx.prec -= extraprec C1 = _airyai_C1(ctx) C2 = _airyai_C2(ctx) T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w return T1, T2 return ctx.hypercomb(h, [], **kwargs) @defun def airybi(ctx, z, derivative=0, **kwargs): z = ctx.convert(z) if derivative: n, ntype = ctx._convert_param(derivative) else: n = 0 # Values at infinities if not ctx.isnormal(z) and z: if n and ntype == 'Z': if z == ctx.inf: return z if z == ctx.ninf: if n == -1: return 1/z if n == -2: return _airybi_n2_inf(ctx) if n < -2: return (-1)**n * (-z) if not n: if z == ctx.inf: return z if z == ctx.ninf: return 1/z # TODO: limits raise ValueError("essential singularity of Bi(z)") if z: extraprec = max(0, int(1.5*ctx.mag(z))) else: extraprec = 0 if n: if n == 1: # http://functions.wolfram.com/03.08.26.0001.01 def h(): ctx.prec += extraprec w = z**3 / 9 ctx.prec -= extraprec C1 = _airybi_C1(ctx)*0.5 C2 = _airybi_C2(ctx) T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w return T1, T2 return ctx.hypercomb(h, [], **kwargs) else: if z == 0: return _airyderiv_0(ctx, z, n, ntype, 1) def h(n): ctx.prec += extraprec w = z**3/9 ctx.prec -= extraprec q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 q16 = ctx.mpq_1_6 q56 = ctx.mpq_5_6 a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \ [a1,a2], [b1,b2,b3], w a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \ [a1,a2], [b1,b2,b3], w return T1, T2 v = ctx.hypercomb(h, [n], **kwargs) if ctx._is_real_type(z) and ctx.isint(n): v = ctx._re(v) return v else: def h(): ctx.prec += extraprec w = z**3 / 9 ctx.prec -= extraprec C1 = _airybi_C1(ctx) C2 = _airybi_C2(ctx) T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w return T1, T2 return ctx.hypercomb(h, [], **kwargs) def _airy_zero(ctx, which, k, derivative, complex=False): # Asymptotic formulas are given in DLMF section 9.9 def U(t): return t**(2/3.)*(1-7/(t**2*48)) def T(t): return t**(2/3.)*(1+5/(t**2*48)) k = int(k) if k < 1: raise ValueError("k cannot be less than 1") if not derivative in (0,1): raise ValueError("Derivative should lie between 0 and 1") if which == 0: if derivative: return ctx.findroot(lambda z: ctx.airyai(z,1), -U(3*ctx.pi*(4*k-3)/8)) return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8)) if which == 1 and complex == False: if derivative: return ctx.findroot(lambda z: ctx.airybi(z,1), -U(3*ctx.pi*(4*k-1)/8)) return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8)) if which == 1 and complex == True: if derivative: t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2 s = ctx.expjpi(ctx.mpf(1)/3) * T(t) return ctx.findroot(lambda z: ctx.airybi(z,1), s) t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2 s = ctx.expjpi(ctx.mpf(1)/3) * U(t) return ctx.findroot(ctx.airybi, s) @defun def airyaizero(ctx, k, derivative=0): return _airy_zero(ctx, 0, k, derivative, False) @defun def airybizero(ctx, k, derivative=0, complex=False): return _airy_zero(ctx, 1, k, derivative, complex) def _scorer(ctx, z, which, kwargs): z = ctx.convert(z) if ctx.isinf(z): if z == ctx.inf: if which == 0: return 1/z if which == 1: return z if z == ctx.ninf: return 1/z raise ValueError("essential singularity") if z: extraprec = max(0, int(1.5*ctx.mag(z))) else: extraprec = 0 if kwargs.get('derivative'): raise NotImplementedError # Direct asymptotic expansions, to avoid # exponentially large cancellation try: if ctx.mag(z) > 3: if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999: def h(): return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999: def h(): return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) except ctx.NoConvergence: pass def h(): A = ctx.airybi(z, **kwargs)/3 B = -2*ctx.pi if which == 1: A *= 2 B *= -1 ctx.prec += extraprec w = z**3/9 ctx.prec -= extraprec T1 = [A], [1], [], [], [], [], 0 T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w return T1, T2 return ctx.hypercomb(h, [], **kwargs) @defun def scorergi(ctx, z, **kwargs): return _scorer(ctx, z, 0, kwargs) @defun def scorerhi(ctx, z, **kwargs): return _scorer(ctx, z, 1, kwargs) @defun_wrapped def coulombc(ctx, l, eta, _cache={}): if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: return +_cache[l,eta][1] G3 = ctx.loggamma(2*l+2) G1 = ctx.loggamma(1+l+ctx.j*eta) G2 = ctx.loggamma(1+l-ctx.j*eta) v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3) if not (ctx.im(l) or ctx.im(eta)): v = ctx.re(v) _cache[l,eta] = (ctx.prec, v) return v @defun_wrapped def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs): # Regular Coulomb wave function # Note: w can be either 1 or -1; the other may be better in some cases # TODO: check that chop=True chops when and only when it should #ctx.prec += 10 def h(l, eta): try: jw = ctx.j*w jwz = ctx.fmul(jw, z, exact=True) jwz2 = ctx.fmul(jwz, -2, exact=True) C = ctx.coulombc(l, eta) T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \ [2*l+2], jwz2 except ValueError: T1 = [0], [-1], [], [], [], [], 0 return (T1,) v = ctx.hypercomb(h, [l,eta], **kwargs) if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \ (ctx.re(z) >= 0): v = ctx.re(v) return v @defun_wrapped def _coulomb_chi(ctx, l, eta, _cache={}): if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: return _cache[l,eta][1] def terms(): l2 = -l-1 jeta = ctx.j*eta return [ctx.loggamma(1+l+jeta) * (-0.5j), ctx.loggamma(1+l-jeta) * (0.5j), ctx.loggamma(1+l2+jeta) * (0.5j), ctx.loggamma(1+l2-jeta) * (-0.5j), -(l+0.5)*ctx.pi] v = ctx.sum_accurately(terms, 1) _cache[l,eta] = (ctx.prec, v) return v @defun_wrapped def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs): # Irregular Coulomb wave function # Note: w can be either 1 or -1; the other may be better in some cases # TODO: check that chop=True chops when and only when it should if not ctx._im(l): l = ctx._re(l) # XXX: for isint def h(l, eta): # Force perturbation for integers and half-integers if ctx.isint(l*2): T1 = [0], [-1], [], [], [], [], 0 return (T1,) l2 = -l-1 try: chi = ctx._coulomb_chi(l, eta) jw = ctx.j*w s = ctx.sin(chi); c = ctx.cos(chi) C1 = ctx.coulombc(l,eta) C2 = ctx.coulombc(l2,eta) u = ctx.exp(jw*z) x = -2*jw*z T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \ [1+l+jw*eta], [2*l+2], x T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \ [1+l2+jw*eta], [2*l2+2], x return T1, T2 except ValueError: T1 = [0], [-1], [], [], [], [], 0 return (T1,) v = ctx.hypercomb(h, [l,eta], **kwargs) if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \ (ctx._re(z) >= 0): v = ctx._re(v) return v def mcmahon(ctx,kind,prime,v,m): """ Computes an estimate for the location of the Bessel function zero j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)). Returns (r,err) where r is the estimated location of the root and err is a positive number estimating the error of the asymptotic expansion. """ u = 4*v**2 if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4 if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4 if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4 if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4 if not prime: s1 = b s2 = -(u-1)/(8*b) s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3) s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5) s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7) if prime: s1 = b s2 = -(u+3)/(8*b) s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3) s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5) s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7) terms = [s1,s2,s3,s4,s5] s = s1 err = 0.0 for i in range(1,len(terms)): if abs(terms[i]) < abs(terms[i-1]): s += terms[i] else: err = abs(terms[i]) if i == len(terms)-1: err = abs(terms[-1]) return s, err def generalized_bisection(ctx,f,a,b,n): """ Given f known to have exactly n simple roots within [a,b], return a list of n intervals isolating the roots and having opposite signs at the endpoints. TODO: this can be optimized, e.g. by reusing evaluation points. """ if n < 1: raise ValueError("n cannot be less than 1") N = n+1 points = [] signs = [] while 1: points = ctx.linspace(a,b,N) signs = [ctx.sign(f(x)) for x in points] ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \ if signs[i]*signs[i+1] == -1] if len(ok_intervals) == n: return ok_intervals N = N*2 def find_in_interval(ctx, f, ab): return ctx.findroot(f, ab, solver='illinois', verify=False) def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}): prec = ctx.prec workprec = max(prec, ctx.mag(v), ctx.mag(m))+10 try: ctx.prec = workprec v = ctx.mpf(v) m = int(m) prime = int(prime) if v < 0: raise ValueError("v cannot be negative") if m < 1: raise ValueError("m cannot be less than 1") if not prime in (0,1): raise ValueError("prime should lie between 0 and 1") if kind == 1: if prime: f = lambda x: ctx.besselj(v,x,derivative=1) else: f = lambda x: ctx.besselj(v,x) if kind == 2: if prime: f = lambda x: ctx.bessely(v,x,derivative=1) else: f = lambda x: ctx.bessely(v,x) # The first root of J' is very close to 0 for small # orders, and this needs to be special-cased if kind == 1 and prime and m == 1: if v == 0: return ctx.zero if v <= 1: # TODO: use v <= j'_{v,1} < y_{v,1}? r = 2*ctx.sqrt(v*(1+v)/(v+2)) return find_in_interval(ctx, f, (r/10, 2*r)) if (kind,prime,v,m) in _interval_cache: return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m]) r, err = mcmahon(ctx, kind, prime, v, m) if err < isoltol: return find_in_interval(ctx, f, (r-isoltol, r+isoltol)) # An x such that 0 < x < r_{v,1} if kind == 1 and not prime: low = 2.4 if kind == 1 and prime: low = 1.8 if kind == 2 and not prime: low = 0.8 if kind == 2 and prime: low = 2.0 n = m+1 while 1: r1, err = mcmahon(ctx, kind, prime, v, n) if err < isoltol: r2, err2 = mcmahon(ctx, kind, prime, v, n+1) intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n) for k, ab in enumerate(intervals): _interval_cache[kind,prime,v,k+1] = ab return find_in_interval(ctx, f, intervals[m-1]) else: n = n*2 finally: ctx.prec = prec @defun def besseljzero(ctx, v, m, derivative=0): r""" For a real order `\nu \ge 0` and a positive integer `m`, returns `j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively, with *derivative=1*, gives the first nonnegative simple zero `j'_{\nu,m}` of `J'_{\nu}(z)`. The indexing convention is that used by Abramowitz & Stegun and the DLMF. Note the special case `j'_{0,1} = 0`, while all other zeros are positive. In effect, only simple zeros are counted (all zeros of Bessel functions are simple except possibly `z = 0`) and `j_{\nu,m}` becomes a monotonic function of both `\nu` and `m`. The zeros are interlaced according to the inequalities .. math :: j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1} j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots **Examples** Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3) 2.404825557695772768621632 5.520078110286310649596604 8.653727912911012216954199 >>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3) 3.831705970207512315614436 7.01558666981561875353705 10.17346813506272207718571 >>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3) 5.135622301840682556301402 8.417244140399864857783614 11.61984117214905942709415 Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`:: 0.0 3.831705970207512315614436 7.01558666981561875353705 >>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1) 1.84118378134065930264363 5.331442773525032636884016 8.536316366346285834358961 >>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1) 3.054236928227140322755932 6.706133194158459146634394 9.969467823087595793179143 Zeros with large index:: >>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000) 313.3742660775278447196902 3140.807295225078628895545 31415.14114171350798533666 >>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000) 321.1893195676003157339222 3148.657306813047523500494 31422.9947255486291798943 >>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1) 311.8018681873704508125112 3139.236339643802482833973 31413.57032947022399485808 Zeros of functions with large order:: >>> besseljzero(50,1) 57.11689916011917411936228 >>> besseljzero(50,2) 62.80769876483536093435393 >>> besseljzero(50,100) 388.6936600656058834640981 >>> besseljzero(50,1,1) 52.99764038731665010944037 >>> besseljzero(50,2,1) 60.02631933279942589882363 >>> besseljzero(50,100,1) 387.1083151608726181086283 Zeros of functions with fractional order:: >>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4) 3.141592653589793238462643 4.493409457909064175307881 15.15657692957458622921634 Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite products over their zeros:: >>> v,z = 2, mpf(1) >>> (z/2)**v/gamma(v+1) * \ ... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf]) ... 0.1149034849319004804696469 >>> besselj(v,z) 0.1149034849319004804696469 >>> (z/2)**(v-1)/2/gamma(v) * \ ... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf]) ... 0.2102436158811325550203884 >>> besselj(v,z,1) 0.2102436158811325550203884 """ return +bessel_zero(ctx, 1, derivative, v, m) @defun def besselyzero(ctx, v, m, derivative=0): r""" For a real order `\nu \ge 0` and a positive integer `m`, returns `y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively, with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of `Y'_{\nu}(z)`. The zeros are interlaced according to the inequalities .. math :: y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1} y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots **Examples** Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3) 0.8935769662791675215848871 3.957678419314857868375677 7.086051060301772697623625 >>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3) 2.197141326031017035149034 5.429681040794135132772005 8.596005868331168926429606 >>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3) 3.384241767149593472701426 6.793807513268267538291167 10.02347797936003797850539 Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`:: >>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1) 2.197141326031017035149034 5.429681040794135132772005 8.596005868331168926429606 >>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1) 3.683022856585177699898967 6.941499953654175655751944 10.12340465543661307978775 >>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1) 5.002582931446063945200176 8.350724701413079526349714 11.57419546521764654624265 Zeros with large index:: >>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000) 311.8034717601871549333419 3139.236498918198006794026 31413.57034538691205229188 >>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000) 319.6183338562782156235062 3147.086508524556404473186 31421.42392920214673402828 >>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1) 313.3726705426359345050449 3140.807136030340213610065 31415.14112579761578220175 Zeros of functions with large order:: >>> besselyzero(50,1) 53.50285882040036394680237 >>> besselyzero(50,2) 60.11244442774058114686022 >>> besselyzero(50,100) 387.1096509824943957706835 >>> besselyzero(50,1,1) 56.96290427516751320063605 >>> besselyzero(50,2,1) 62.74888166945933944036623 >>> besselyzero(50,100,1) 388.6923300548309258355475 Zeros of functions with fractional order:: >>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4) 1.570796326794896619231322 2.798386045783887136720249 13.56721208770735123376018 """ return +bessel_zero(ctx, 2, derivative, v, m)
37,938
33.210099
100
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/rszeta.py
""" --------------------------------------------------------------------- .. sectionauthor:: Juan Arias de Reyna <arias@us.es> This module implements zeta-related functions using the Riemann-Siegel expansion: zeta_offline(s,k=0) * coef(J, eps): Need in the computation of Rzeta(s,k) * Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously for 0 <= k <= der. Used by zeta_offline and z_offline * Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by z_half(t,k) and zeta_half * z_offline(w,k): Z(w) and its derivatives of order k <= 4 * z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4 * zeta_offline(s): zeta(s) and its derivatives of order k<= 4 * zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4 * rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline * rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half ---------------------------------------------------------------------- This program uses Riemann-Siegel expansion even to compute zeta(s) on points s = sigma + i t with sigma arbitrary not necessarily equal to 1/2. It is founded on a new deduction of the formula, with rigorous and sharp bounds for the terms and rest of this expansion. More information on the papers: J. Arias de Reyna, High Precision Computation of Riemann's Zeta Function by the Riemann-Siegel Formula I, II We refer to them as I, II. In them we shall find detailed explanation of all the procedure. The program uses Riemann-Siegel expansion. This is useful when t is big, ( say t > 10000 ). The precision is limited, roughly it can compute zeta(sigma+it) with an error less than exp(-c t) for some constant c depending on sigma. The program gives an error when the Riemann-Siegel formula can not compute to the wanted precision. """ import math class RSCache(object): def __init__(ctx): ctx._rs_cache = [0, 10, {}, {}] from .functions import defun #-------------------------------------------------------------------------------# # # # coef(ctx, J, eps, _cache=[0, 10, {} ] ) # # # #-------------------------------------------------------------------------------# # This function computes the coefficients c[n] defined on (I, equation (47)) # but see also (II, section 3.14). # # Since these coefficients are very difficult to compute we save the values # in a cache. So if we compute several values of the functions Rzeta(s) for # near values of s, we do not recompute these coefficients. # # c[n] are the Taylor coefficients of the function: # # F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z)) # # def _coef(ctx, J, eps): r""" Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps **Definition** The coefficients c_n are defined by .. math :: \begin{equation} F(z)=\frac{e^{\pi i \bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi z}=\sum_{n=0}^\infty c_{2n} z^{2n} \end{equation} they are computed applying the relation .. math :: \begin{multline} c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n} \sum_{k=0}^n\frac{(-1)^k}{(2k)!} 2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\ +e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{ E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}. \end{multline} """ newJ = J+2 # compute more coefficients that are needed neweps6 = eps/2. # compute with a slight more precision that are needed # PREPARATION FOR THE COMPUTATION OF V(N) AND W(N) # See II Section 3.16 # # Computing the exponent wpvw of the error II equation (81) wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6)) # Preparation of Euler numbers (we need until the 2*RS_NEWJ) E = ctx._eulernum(2*newJ) # Now we have in the cache all the needed Euler numbers. # # Computing the powers of pi # # We need to compute the powers pi**n for 1<= n <= 2*J # with relative error less than 2**(-wpvw) # it is easy to show that this is obtained # taking wppi as the least d with # 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw # In II Section 3.9 we need also that # wppi > wptcoef[0], and that the powers # here computed 0<= k <= 2*newJ are more # than those needed there that are 2*L-2. # so we need J >= L this will be checked # before computing tcoef[] wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw) ctx.prec = wppi pipower = {} pipower[0] = ctx.one pipower[1] = ctx.pi for n in range(2,2*newJ+1): pipower[n] = pipower[n-1]*ctx.pi # COMPUTING THE COEFFICIENTS v(n) AND w(n) # see II equation (61) and equations (81) and (82) ctx.prec = wpvw+2 v={} w={} for n in range(0,newJ+1): va = (-1)**n * ctx._eulernum(2*n) va = ctx.mpf(va)/ctx.fac(2*n) v[n]=va*pipower[2*n] for n in range(0,2*newJ+1): wa = ctx.one/ctx.fac(n) wa=wa/(2**n) w[n]=wa*pipower[n] # COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2 # See II Section 3.16 ctx.prec = 15 wpp1a = 9 - ctx.mag(neweps6) P1 = {} for n in range(0,newJ+1): ctx.prec = 15 wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a) ctx.prec = wpp1 sump = 0 for k in range(0,n+1): sump += ((-1)**k) * v[k]*w[2*n-2*k] P1[n]=((-1)**(n+1))*ctx.j*sump P2={} for n in range(0,newJ+1): ctx.prec = 15 wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a) ctx.prec = wpp2 sump = 0 for k in range(0,n+1): sump += (ctx.j**(n-k)) * v[k]*w[n-k] P2[n]=sump # COMPUTING THE COEFFICIENTS c[2n] # See II Section 3.14 ctx.prec = 15 wpc0 = 5 - ctx.mag(neweps6) wpc = max(6,4*newJ+wpc0) ctx.prec = wpc mu = ctx.sqrt(ctx.mpf('2'))/2 nu = ctx.expjpi(3./8)/2 c={} for n in range(0,newJ): ctx.prec = 15 wpc = max(6,4*n+wpc0) ctx.prec = wpc c[2*n] = mu*P1[n]+nu*P2[n] for n in range(1,2*newJ,2): c[n] = 0 return [newJ, neweps6, c, pipower] def coef(ctx, J, eps): _cache = ctx._rs_cache if J <= _cache[0] and eps >= _cache[1]: return _cache[2], _cache[3] orig = ctx._mp.prec try: data = _coef(ctx._mp, J, eps) finally: ctx._mp.prec = orig if ctx is not ctx._mp: data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items()) data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items()) ctx._rs_cache[:] = data return ctx._rs_cache[2], ctx._rs_cache[3] #-------------------------------------------------------------------------------# # # # Rzeta_simul(s,k=0) # # # #-------------------------------------------------------------------------------# # This function return a list with the values: # Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)), # .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it)) # # Useful to compute the function zeta(s) and Z(w) or its derivatives. # def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL): # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE # See II Section 3.11 equations (47) and (48) aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi) aux1 = ctx.ln(aux1) aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2 m = 3*xL-3 aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) while((aux1 < m*aux2+ aux3)and (m>1)): m = m - 1 aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) xM = m return xM def aux_J_needed(ctx, xA, xeps4, a, xB1, xM): # DETERMINATION OF J THE NUMBER OF TERMS NEEDED # IN THE TAYLOR SERIES OF F. # See II Section 3.11 equation (49)) # Only determine one h1 = xeps4/(632*xA) h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3) h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM h3 = min(h1,h2) return h3 def Rzeta_simul(ctx, s, der=0): # First we take the value of ctx.prec wpinitial = ctx.prec # INITIALIZATION # Take the real and imaginary part of s t = ctx._im(s) xsigma = ctx._re(s) ysigma = 1 - xsigma # Now compute several parameter that appear on the program ctx.prec = 15 a = ctx.sqrt(t/(2*ctx.pi)) xasigma = a ** xsigma yasigma = a ** ysigma # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) xA1=ctx.power(2, ctx.mag(xasigma)-1) yA1=ctx.power(2, ctx.mag(yasigma)-1) # We compute various epsilon's (see II end of Section 3.1) eps = ctx.power(2, -wpinitial) eps1 = eps/6. xeps2 = eps * xA1/3. yeps2 = eps * yA1/3. # COMPUTING SOME COEFFICIENTS THAT DEPENDS # ON sigma # constant b and c (see I Theorem 2 formula (26) ) # coefficients A and B1 (see I Section 6.1 equation (50)) # # here we not need high precision ctx.prec = 15 if xsigma > 0: xb = 2. xc = math.pow(9,xsigma)/4.44288 # 4.44288 =(math.sqrt(2)*math.pi) xA = math.pow(9,xsigma) xB1 = 1 else: xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) xc = math.pow(2,-xsigma)/4.44288 xA = math.pow(2,-xsigma) xB1 = 1.10789 # = 2*sqrt(1-log(2)) if(ysigma > 0): yb = 2. yc = math.pow(9,ysigma)/4.44288 # 4.44288 =(math.sqrt(2)*math.pi) yA = math.pow(9,ysigma) yB1 = 1 else: yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) yc = math.pow(2,-ysigma)/4.44288 yA = math.pow(2,-ysigma) yB1 = 1.10789 # = 2*sqrt(1-log(2)) # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL # CORRECTION # See II Section 3.2 ctx.prec = 15 xL = 1 while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2: xL = xL+1 xL = max(2,xL) yL = 1 while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2: yL = yL+1 yL = max(2,yL) # The number L has to satify some conditions. # If not RS can not compute Rzeta(s) with the prescribed precision # (see II, Section 3.2 condition (20) ) and # (II, Section 3.3 condition (22) ). Also we have added # an additional technical condition in Section 3.17 Proposition 17 if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \ (3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)): ctx.prec = wpinitial raise NotImplementedError("Riemann-Siegel can not compute with such precision") # We take the maximum of the two values L = max(xL, yL) # INITIALIZATION (CONTINUATION) # # eps3 is the constant defined on (II, Section 3.5 equation (27) ) # each term of the RS correction must be computed with error <= eps3 xeps3 = xeps2/(4*xL) yeps3 = yeps2/(4*yL) # eps4 is defined on (II Section 3.6 equation (30) ) # each component of the formula (II Section 3.6 equation (29) ) # must be computed with error <= eps4 xeps4 = xeps3/(3*xL) yeps4 = yeps3/(3*yL) # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL) yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL) M = max(xM, yM) # COMPUTING NUMBER OF TERMS J NEEDED h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM) h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM) h3 = min(h3,h4) J = 12 jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) while jvalue > h3: J = J+1 jvalue = (2*ctx.pi)*jvalue/J # COMPUTING eps5[m] for 1 <= m <= 21 # See II Section 10 equation (43) # We choose the minimum of the two possibilities eps5={} xforeps5 = math.pi*math.pi*xB1*a yforeps5 = math.pi*math.pi*yB1*a for m in range(0,22): xaux1 = math.pow(xforeps5, m/3)/(316.*xA) yaux1 = math.pow(yforeps5, m/3)/(316.*yA) aux1 = min(xaux1, yaux1) aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) aux2 = math.sqrt(aux2) eps5[m] = (aux1*aux2*min(xeps4,yeps4)) # COMPUTING wpfp # See II Section 3.13 equation (59) twenty = min(3*L-3, 21)+1 aux = 6812*J wpfp = ctx.mag(44*J) for m in range(0,twenty): wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) # COMPUTING N AND p # See II Section ctx.prec = wpfp + ctx.mag(t)+20 a = ctx.sqrt(t/(2*ctx.pi)) N = ctx.floor(a) p = 1-2*(a-N) # now we get a rounded version of p # to the precision wpfp # this possibly is not necessary num=ctx.floor(p*(ctx.mpf('2')**wpfp)) difference = p * (ctx.mpf('2')**wpfp)-num if (difference < 0.5): num = num else: num = num+1 p = ctx.convert(num * (ctx.mpf('2')**(-wpfp))) # COMPUTING THE COEFFICIENTS c[n] = cc[n] # We shall use the notation cc[n], since there is # a constant that is called c # See II Section 3.14 # We compute the coefficients and also save then in a # cache. The bulk of the computation is passed to # the function coef() # # eps6 is defined in II Section 3.13 equation (58) eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J) # Now we compute the coefficients cc = {} cont = {} cont, pipowers = coef(ctx, J, eps6) cc=cont.copy() # we need a copy since we have to change his values. Fp={} # this is the adequate locus of this for n in range(M, 3*L-2): Fp[n] = 0 Fp={} ctx.prec = wpfp for m in range(0,M+1): sumP = 0 for k in range(2*J-m-1,-1,-1): sumP = (sumP * p)+ cc[k] Fp[m] = sumP # preparation of the new coefficients for k in range(0,2*J-m-1): cc[k] = (k+1)* cc[k+1] # COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k] # See II Section 3.17 # # First we compute the working precisions xwpd[k] # Se II equation (92) xwpd={} d1 = max(6,ctx.mag(40*L*L)) xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1 xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2 for n in range(0,L): xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2 xwpd[n]=max(xd3,d1) # procedure of II Section 3.17 ctx.prec = xwpd[1]+10 xpsigma = 1-(2*xsigma) xd = {} xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0 xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0 for n in range(1,L): ctx.prec = xwpd[n]+10 for k in range(0,3*n//2+1): m = 3*n-2*k if(m!=0): m1 = ctx.one/m c1= m1/4 c2=(xpsigma*m1)/2 c3=-(m+1) xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1] else: xd[0,n,k]=0 for r in range(0,k): add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) xd[0,n,k] -= ((-1)**(k-r))*add xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0 for mu in range(-2,der+1): for n in range(-2,L): for k in range(-3,max(1,3*n//2+2)): if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): xd[mu,n,k] = 0 for mu in range(1,der+1): for n in range(0,L): ctx.prec = xwpd[n]+10 for k in range(0,3*n//2+1): aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3] xd[mu,n,k] = aux - xd[mu-1,n-1,k-1] # Now we compute the working precisions ywpd[k] # Se II equation (92) ywpd={} d1 = max(6,ctx.mag(40*L*L)) yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1 yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2 for n in range(0,L): yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2 ywpd[n]=max(yd3,d1) # procedure of II Section 3.17 ctx.prec = ywpd[1]+10 ypsigma = 1-(2*ysigma) yd = {} yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0 yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0 for n in range(1,L): ctx.prec = ywpd[n]+10 for k in range(0,3*n//2+1): m = 3*n-2*k if(m!=0): m1 = ctx.one/m c1= m1/4 c2=(ypsigma*m1)/2 c3=-(m+1) yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1] else: yd[0,n,k]=0 for r in range(0,k): add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) yd[0,n,k] -= ((-1)**(k-r))*add yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0 for mu in range(-2,der+1): for n in range(-2,L): for k in range(-3,max(1,3*n//2+2)): if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): yd[mu,n,k] = 0 for mu in range(1,der+1): for n in range(0,L): ctx.prec = ywpd[n]+10 for k in range(0,3*n//2+1): aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3] yd[mu,n,k] = aux - yd[mu-1,n-1,k-1] # COMPUTING THE COEFFICIENTS xtcoef[k,l] # See II Section 3.9 # # computing the needed wp xwptcoef={} xwpterm={} ctx.prec = 15 c1 = ctx.mag(40*(L+2)) xc2 = ctx.mag(68*(L+2)*xA) xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1 for k in range(0,L): xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2. xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5 xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20) ywptcoef={} ywpterm={} ctx.prec = 15 c1 = ctx.mag(40*(L+2)) yc2 = ctx.mag(68*(L+2)*yA) yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1 for k in range(0,L): yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2. ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5 ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10 # check of power of pi # computing the fortcoef[mu,k,ell] xfortcoef={} for mu in range(0,der+1): for k in range(0,L): for ell in range(-2,3*k//2+1): xfortcoef[mu,k,ell]=0 for mu in range(0,der+1): for k in range(0,L): ctx.prec = xwptcoef[k] for ell in range(0,3*k//2+1): xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell) def trunc_a(t): wp = ctx.prec ctx.prec = wp + 2 aa = ctx.sqrt(t/(2*ctx.pi)) ctx.prec = wp return aa # computing the tcoef[k,ell] xtcoef={} for mu in range(0,der+1): for k in range(0,L): for ell in range(-2,3*k//2+1): xtcoef[mu,k,ell]=0 ctx.prec = max(xwptcoef[0],ywptcoef[0])+3 aa= trunc_a(t) la = -ctx.ln(aa) for chi in range(0,der+1): for k in range(0,L): ctx.prec = xwptcoef[k] for ell in range(0,3*k//2+1): xtcoef[chi,k,ell] =0 for mu in range(0, chi+1): tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell] xtcoef[chi,k,ell] += tcoefter # COMPUTING THE COEFFICIENTS ytcoef[k,l] # See II Section 3.9 # # computing the needed wp # check of power of pi # computing the fortcoef[mu,k,ell] yfortcoef={} for mu in range(0,der+1): for k in range(0,L): for ell in range(-2,3*k//2+1): yfortcoef[mu,k,ell]=0 for mu in range(0,der+1): for k in range(0,L): ctx.prec = ywptcoef[k] for ell in range(0,3*k//2+1): yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell) # computing the tcoef[k,ell] ytcoef={} for chi in range(0,der+1): for k in range(0,L): for ell in range(-2,3*k//2+1): ytcoef[chi,k,ell]=0 for chi in range(0,der+1): for k in range(0,L): ctx.prec = ywptcoef[k] for ell in range(0,3*k//2+1): ytcoef[chi,k,ell] =0 for mu in range(0, chi+1): tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell] ytcoef[chi,k,ell] += tcoefter # COMPUTING tv[k,ell] # See II Section 3.8 # # a has a good value ctx.prec = max(xwptcoef[0], ywptcoef[0])+2 av = {} av[0] = 1 av[1] = av[0]/a ctx.prec = max(xwptcoef[0],ywptcoef[0]) for k in range(2,L): av[k] = av[k-1] * av[1] # Computing the quotients xtv = {} for chi in range(0,der+1): for k in range(0,L): ctx.prec = xwptcoef[k] for ell in range(0,3*k//2+1): xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k] # Computing the quotients ytv = {} for chi in range(0,der+1): for k in range(0,L): ctx.prec = ywptcoef[k] for ell in range(0,3*k//2+1): ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k] # COMPUTING THE TERMS xterm[k] # See II Section 3.6 xterm = {} for chi in range(0,der+1): for n in range(0,L): ctx.prec = xwpterm[n] te = 0 for k in range(0, 3*n//2+1): te += xtv[chi,n,k] xterm[chi,n] = te # COMPUTING THE TERMS yterm[k] # See II Section 3.6 yterm = {} for chi in range(0,der+1): for n in range(0,L): ctx.prec = ywpterm[n] te = 0 for k in range(0, 3*n//2+1): te += ytv[chi,n,k] yterm[chi,n] = te # COMPUTING rssum # See II Section 3.5 xrssum={} ctx.prec=15 xrsbound = math.sqrt(ctx.pi) * xc /(xb*a) ctx.prec=15 xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2) xwprssum = max(xwprssum, ctx.mag(10*(L+1))) ctx.prec = xwprssum for chi in range(0,der+1): xrssum[chi] = 0 for k in range(1,L+1): xrssum[chi] += xterm[chi,L-k] yrssum={} ctx.prec=15 yrsbound = math.sqrt(ctx.pi) * yc /(yb*a) ctx.prec=15 ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2) ywprssum = max(ywprssum, ctx.mag(10*(L+1))) ctx.prec = ywprssum for chi in range(0,der+1): yrssum[chi] = 0 for k in range(1,L+1): yrssum[chi] += yterm[chi,L-k] # COMPUTING S3 # See II Section 3.19 ctx.prec = 15 A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0])))) eps8 = eps/(3*A2) T = t *ctx.ln(t/(2*ctx.pi)) xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T) ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T) ctx.prec = max(xwps3, ywps3) tpi = t/(2*ctx.pi) arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 U = ctx.expj(-arg) a = trunc_a(t) xasigma = ctx.power(a, -xsigma) yasigma = ctx.power(a, -ysigma) xS3 = ((-1)**(N-1)) * xasigma * U yS3 = ((-1)**(N-1)) * yasigma * U # COMPUTING S1 the zetasum # See II Section 3.18 ctx.prec = 15 xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1) ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1) wpsum = max(xwpsum, ywpsum) ctx.prec = wpsum +10 ''' # This can be improved xS1={} yS1={} for chi in range(0,der+1): xS1[chi] = 0 yS1[chi] = 0 for n in range(1,int(N)+1): ln = ctx.ln(n) xexpn = ctx.exp(-ln*(xsigma+ctx.j*t)) yexpn = ctx.conj(1/(n*xexpn)) for chi in range(0,der+1): pown = ctx.power(-ln, chi) xterm = pown*xexpn yterm = pown*yexpn xS1[chi] += xterm yS1[chi] += yterm ''' xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True) # END OF COMPUTATION of xrz, yrz # See II Section 3.1 ctx.prec = 15 xabsS1 = abs(xS1[der]) xabsS2 = abs(xrssum[der] * xS3) xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) ) ctx.prec = xwpend xrz={} for chi in range(0,der+1): xrz[chi] = xS1[chi]+xrssum[chi]*xS3 ctx.prec = 15 yabsS1 = abs(yS1[der]) yabsS2 = abs(yrssum[der] * yS3) ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) ) ctx.prec = ywpend yrz={} for chi in range(0,der+1): yrz[chi] = yS1[chi]+yrssum[chi]*yS3 yrz[chi] = ctx.conj(yrz[chi]) ctx.prec = wpinitial return xrz, yrz def Rzeta_set(ctx, s, derivatives=[0]): r""" Computes several derivatives of the auxiliary function of Riemann `R(s)`. **Definition** The function is defined by .. math :: \begin{equation} {\mathop{\mathcal R }\nolimits}(s)= \int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}- e^{-\pi i x}}\,dx \end{equation} To this function we apply the Riemann-Siegel expansion. """ der = max(derivatives) # First we take the value of ctx.prec # During the computation we will change ctx.prec, and finally we will # restaurate the initial value wpinitial = ctx.prec # Take the real and imaginary part of s t = ctx._im(s) sigma = ctx._re(s) # Now compute several parameter that appear on the program ctx.prec = 15 a = ctx.sqrt(t/(2*ctx.pi)) # Careful asigma = ctx.power(a, sigma) # Careful # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) A1 = ctx.power(2, ctx.mag(asigma)-1) # We compute various epsilon's (see II end of Section 3.1) eps = ctx.power(2, -wpinitial) eps1 = eps/6. eps2 = eps * A1/3. # COMPUTING SOME COEFFICIENTS THAT DEPENDS # ON sigma # constant b and c (see I Theorem 2 formula (26) ) # coefficients A and B1 (see I Section 6.1 equation (50)) # here we not need high precision ctx.prec = 15 if sigma > 0: b = 2. c = math.pow(9,sigma)/4.44288 # 4.44288 =(math.sqrt(2)*math.pi) A = math.pow(9,sigma) B1 = 1 else: b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) c = math.pow(2,-sigma)/4.44288 A = math.pow(2,-sigma) B1 = 1.10789 # = 2*sqrt(1-log(2)) # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL # CORRECTION # See II Section 3.2 ctx.prec = 15 L = 1 while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2: L = L+1 L = max(2,L) # The number L has to satify some conditions. # If not RS can not compute Rzeta(s) with the prescribed precision # (see II, Section 3.2 condition (20) ) and # (II, Section 3.3 condition (22) ). Also we have added # an additional technical condition in Section 3.17 Proposition 17 if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)): #print 'Error Riemann-Siegel can not compute with such precision' ctx.prec = wpinitial raise NotImplementedError("Riemann-Siegel can not compute with such precision") # INITIALIZATION (CONTINUATION) # # eps3 is the constant defined on (II, Section 3.5 equation (27) ) # each term of the RS correction must be computed with error <= eps3 eps3 = eps2/(4*L) # eps4 is defined on (II Section 3.6 equation (30) ) # each component of the formula (II Section 3.6 equation (29) ) # must be computed with error <= eps4 eps4 = eps3/(3*L) # COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE M = aux_M_Fp(ctx, A, eps4, a, B1, L) Fp = {} for n in range(M, 3*L-2): Fp[n] = 0 # But I have not seen an instance of M != 3*L-3 # # DETERMINATION OF J THE NUMBER OF TERMS NEEDED # IN THE TAYLOR SERIES OF F. # See II Section 3.11 equation (49)) h1 = eps4/(632*A) h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M h3 = min(h1,h2) J=12 jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) while jvalue > h3: J = J+1 jvalue = (2*ctx.pi)*jvalue/J # COMPUTING eps5[m] for 1 <= m <= 21 # See II Section 10 equation (43) eps5={} foreps5 = math.pi*math.pi*B1*a for m in range(0,22): aux1 = math.pow(foreps5, m/3)/(316.*A) aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) aux2 = math.sqrt(aux2) eps5[m] = aux1*aux2*eps4 # COMPUTING wpfp # See II Section 3.13 equation (59) twenty = min(3*L-3, 21)+1 aux = 6812*J wpfp = ctx.mag(44*J) for m in range(0, twenty): wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) # COMPUTING N AND p # See II Section ctx.prec = wpfp + ctx.mag(t) + 20 a = ctx.sqrt(t/(2*ctx.pi)) N = ctx.floor(a) p = 1-2*(a-N) # now we get a rounded version of p to the precision wpfp # this possibly is not necessary num = ctx.floor(p*(ctx.mpf(2)**wpfp)) difference = p * (ctx.mpf(2)**wpfp)-num if difference < 0.5: num = num else: num = num+1 p = ctx.convert(num * (ctx.mpf(2)**(-wpfp))) # COMPUTING THE COEFFICIENTS c[n] = cc[n] # We shall use the notation cc[n], since there is # a constant that is called c # See II Section 3.14 # We compute the coefficients and also save then in a # cache. The bulk of the computation is passed to # the function coef() # # eps6 is defined in II Section 3.13 equation (58) eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J) # Now we compute the coefficients cc={} cont={} cont, pipowers = coef(ctx, J, eps6) cc = cont.copy() # we need a copy since we have Fp={} for n in range(M, 3*L-2): Fp[n] = 0 ctx.prec = wpfp for m in range(0,M+1): sumP = 0 for k in range(2*J-m-1,-1,-1): sumP = (sumP * p) + cc[k] Fp[m] = sumP # preparation of the new coefficients for k in range(0, 2*J-m-1): cc[k] = (k+1) * cc[k+1] # COMPUTING THE NUMBERS d[n,k] # See II Section 3.17 # First we compute the working precisions wpd[k] # Se II equation (92) wpd = {} d1 = max(6, ctx.mag(40*L*L)) d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1 const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2 for n in range(0,L): d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2 wpd[n] = max(d3,d1) # procedure of II Section 3.17 ctx.prec = wpd[1]+10 psigma = 1-(2*sigma) d = {} d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0 d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0 for n in range(1,L): ctx.prec = wpd[n]+10 for k in range(0,3*n//2+1): m = 3*n-2*k if (m!=0): m1 = ctx.one/m c1 = m1/4 c2 = (psigma*m1)/2 c3 = -(m+1) d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1] else: d[0,n,k]=0 for r in range(0,k): add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r)) d[0,n,k] -= ((-1)**(k-r))*add d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0 for mu in range(-2,der+1): for n in range(-2,L): for k in range(-3,max(1,3*n//2+2)): if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)): d[mu,n,k] = 0 for mu in range(1,der+1): for n in range(0,L): ctx.prec = wpd[n]+10 for k in range(0,3*n//2+1): aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3] d[mu,n,k] = aux - d[mu-1,n-1,k-1] # COMPUTING THE COEFFICIENTS t[k,l] # See II Section 3.9 # # computing the needed wp wptcoef = {} wpterm = {} ctx.prec = 15 c1 = ctx.mag(40*(L+2)) c2 = ctx.mag(68*(L+2)*A) c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1 for k in range(0,L): c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2. wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10 wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10 # check of power of pi # computing the fortcoef[mu,k,ell] fortcoef={} for mu in derivatives: for k in range(0,L): for ell in range(-2,3*k//2+1): fortcoef[mu,k,ell]=0 for mu in derivatives: for k in range(0,L): ctx.prec = wptcoef[k] for ell in range(0,3*k//2+1): fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell) def trunc_a(t): wp = ctx.prec ctx.prec = wp + 2 aa = ctx.sqrt(t/(2*ctx.pi)) ctx.prec = wp return aa # computing the tcoef[chi,k,ell] tcoef={} for chi in derivatives: for k in range(0,L): for ell in range(-2,3*k//2+1): tcoef[chi,k,ell]=0 ctx.prec = wptcoef[0]+3 aa = trunc_a(t) la = -ctx.ln(aa) for chi in derivatives: for k in range(0,L): ctx.prec = wptcoef[k] for ell in range(0,3*k//2+1): tcoef[chi,k,ell] = 0 for mu in range(0, chi+1): tcoefter = ctx.binomial(chi,mu) * la**mu * \ fortcoef[chi-mu,k,ell] tcoef[chi,k,ell] += tcoefter # COMPUTING tv[k,ell] # See II Section 3.8 # Computing the powers av[k] = a**(-k) ctx.prec = wptcoef[0] + 2 # a has a good value of a. # See II Section 3.6 av = {} av[0] = 1 av[1] = av[0]/a ctx.prec = wptcoef[0] for k in range(2,L): av[k] = av[k-1] * av[1] # Computing the quotients tv = {} for chi in derivatives: for k in range(0,L): ctx.prec = wptcoef[k] for ell in range(0,3*k//2+1): tv[chi,k,ell] = tcoef[chi,k,ell]* av[k] # COMPUTING THE TERMS term[k] # See II Section 3.6 term = {} for chi in derivatives: for n in range(0,L): ctx.prec = wpterm[n] te = 0 for k in range(0, 3*n//2+1): te += tv[chi,n,k] term[chi,n] = te # COMPUTING rssum # See II Section 3.5 rssum={} ctx.prec=15 rsbound = math.sqrt(ctx.pi) * c /(b*a) ctx.prec=15 wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2) wprssum = max(wprssum, ctx.mag(10*(L+1))) ctx.prec = wprssum for chi in derivatives: rssum[chi] = 0 for k in range(1,L+1): rssum[chi] += term[chi,L-k] # COMPUTING S3 # See II Section 3.19 ctx.prec = 15 A2 = 2**(ctx.mag(rssum[0])) eps8 = eps/(3* A2) T = t * ctx.ln(t/(2*ctx.pi)) wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T) ctx.prec = wps3 tpi = t/(2*ctx.pi) arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 U = ctx.expj(-arg) a = trunc_a(t) asigma = ctx.power(a, -sigma) S3 = ((-1)**(N-1)) * asigma * U # COMPUTING S1 the zetasum # See II Section 3.18 ctx.prec = 15 wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1) ctx.prec = wpsum + 10 ''' # This can be improved S1 = {} for chi in derivatives: S1[chi] = 0 for n in range(1,int(N)+1): ln = ctx.ln(n) expn = ctx.exp(-ln*(sigma+ctx.j*t)) for chi in derivatives: term = ctx.power(-ln, chi)*expn S1[chi] += term ''' S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0] # END OF COMPUTATION # See II Section 3.1 ctx.prec = 15 absS1 = abs(S1[der]) absS2 = abs(rssum[der] * S3) wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2))) ctx.prec = wpend rz = {} for chi in derivatives: rz[chi] = S1[chi]+rssum[chi]*S3 ctx.prec = wpinitial return rz def z_half(ctx,t,der=0): r""" z_half(t,der=0) Computes Z^(der)(t) """ s=ctx.mpf('0.5')+ctx.j*t wpinitial = ctx.prec ctx.prec = 15 tt = t/(2*ctx.pi) wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt)) wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt)) ctx.prec = wptheta theta = ctx.siegeltheta(t) ctx.prec = wpz rz = Rzeta_set(ctx,s, range(der+1)) if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2) if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4) if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8) if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16) exptheta = ctx.expj(theta) if der == 0: z = 2*exptheta*rz[0] if der == 1: zf = 2j*exptheta z = zf*(ps1*rz[0]+rz[1]) if der == 2: zf = 2 * exptheta z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2) if der == 3: zf = -2j*exptheta z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2] z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3) if der == 4: zf = 2*exptheta z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2] z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2 z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4 z = zf*z ctx.prec = wpinitial return ctx._re(z) def zeta_half(ctx, s, k=0): """ zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5 """ wpinitial = ctx.prec sigma = ctx._re(s) t = ctx._im(s) #--- compute wptheta, wpR, wpbasic --- ctx.prec = 53 # X see II Section 3.21 (109) and (110) if sigma > 0: X = ctx.sqrt(abs(s)) else: X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma) # M1 see II Section 3.21 (111) and (112) if sigma > 0: M1 = 2*ctx.sqrt(t/(2*ctx.pi)) else: M1 = 4 * t * X # T see II Section 3.21 (113) abst = abs(0.5-s) T = 2* abst*math.log(abst) # computing wpbasic, wptheta, wpR see II Section 3.21 wpbasic = max(6,3+ctx.mag(t)) wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1 wpbasic = max(wpbasic, wpbasic2) wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1) wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 ctx.prec = wptheta theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2 if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4 if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8 if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16 ctx.prec = wpR xrz = Rzeta_set(ctx,s,range(k+1)) yrz={} for chi in range(0,k+1): yrz[chi] = ctx.conj(xrz[chi]) ctx.prec = wpbasic exptheta = ctx.expj(-2*theta) if k==0: zv = xrz[0]+exptheta*yrz[0] if k==1: zv1 = -yrz[1] - 2*yrz[0]*ps1 zv = xrz[1] + exptheta*zv1 if k==2: zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2 zv = xrz[2]+exptheta*zv1 if k==3: zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 zv = xrz[3]+exptheta*zv1 if k == 4: zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 zv = xrz[4]+exptheta*zv1 ctx.prec = wpinitial return zv def zeta_offline(ctx, s, k=0): """ Computes zeta^(k)(s) off the line """ wpinitial = ctx.prec sigma = ctx._re(s) t = ctx._im(s) #--- compute wptheta, wpR, wpbasic --- ctx.prec = 53 # X see II Section 3.21 (109) and (110) if sigma > 0: X = ctx.power(abs(s), 0.5) else: X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma) # M1 see II Section 3.21 (111) and (112) if (sigma > 0): M1 = 2*ctx.sqrt(t/(2*ctx.pi)) else: M1 = 4 * t * X # M2 see II Section 3.21 (111) and (112) if (1-sigma > 0): M2 = 2*ctx.sqrt(t/(2*ctx.pi)) else: M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5) # T see II Section 3.21 (113) abst = abs(0.5-s) T = 2* abst*math.log(abst) # computing wpbasic, wptheta, wpR see II Section 3.21 wpbasic = max(6,3+ctx.mag(t)) wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1 wpbasic = max(wpbasic, wpbasic2) wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1) wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 ctx.prec = wptheta theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) s1 = s s2 = ctx.conj(1-s1) ctx.prec = wpR xrz, yrz = Rzeta_simul(ctx, s, k) if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2 if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8 if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16 if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32 ctx.prec = wpbasic exptheta = ctx.expj(-2*theta) if k == 0: zv = xrz[0]+exptheta*yrz[0] if k == 1: zv1 = -yrz[1]-2*yrz[0]*ps1 zv = xrz[1]+exptheta*zv1 if k == 2: zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2 zv = xrz[2]+exptheta*zv1 if k == 3: zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 zv = xrz[3]+exptheta*zv1 if k == 4: zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 zv = xrz[4]+exptheta*zv1 ctx.prec = wpinitial return zv def z_offline(ctx, w, k=0): r""" Computes Z(w) and its derivatives off the line """ s = ctx.mpf('0.5')+ctx.j*w s1 = s s2 = ctx.conj(1-s1) wpinitial = ctx.prec ctx.prec = 35 # X see II Section 3.21 (109) and (110) # M1 see II Section 3.21 (111) and (112) if (ctx._re(s1) >= 0): M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi)) X = ctx.sqrt(abs(s1)) else: X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1)) M1 = 4 * ctx._im(s1)*X # M2 see II Section 3.21 (111) and (112) if (ctx._re(s2) >= 0): M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi)) else: M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2)) # T see II Section 3.21 Prop. 27 T = 2*abs(ctx.siegeltheta(w)) # defining some precisions # see II Section 3.22 (115), (116), (117) aux1 = ctx.sqrt(X) aux2 = aux1*(M1+M2) aux3 = 3 +wpinitial wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3) wptheta = max(4,ctx.mag(2.04*aux2)+aux3) wpR = ctx.mag(4*aux1)+aux3 # now the computations ctx.prec = wptheta theta = ctx.siegeltheta(w) ctx.prec = wpR xrz, yrz = Rzeta_simul(ctx,s,k) pta = 0.25 + 0.5j*w ptb = 0.25 - 0.5j*w if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2 if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb)) if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb)) if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb)) ctx.prec = wpbasic exptheta = ctx.expj(theta) if k == 0: zv = exptheta*xrz[0]+yrz[0]/exptheta j = ctx.j if k == 1: zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta if k == 2: zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2) zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta if k == 3: zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2 zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2 zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta zv = zv1+zv2 if k == 4: zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2 zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2 zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3 zv1 = zv1+xrz[4]+j*xrz[0]*ps4 zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2 zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2 zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3 zv2 = zv2+yrz[4]-j*yrz[0]*ps4 zv = exptheta*zv1+zv2/exptheta ctx.prec = wpinitial return zv @defun def rs_zeta(ctx, s, derivative=0, **kwargs): if derivative > 4: raise NotImplementedError s = ctx.convert(s) re = ctx._re(s); im = ctx._im(s) if im < 0: z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative)) return z critical_line = (re == 0.5) if critical_line: return zeta_half(ctx, s, derivative) else: return zeta_offline(ctx, s, derivative) @defun def rs_z(ctx, w, derivative=0): w = ctx.convert(w) re = ctx._re(w); im = ctx._im(w) if re < 0: return rs_z(ctx, -w, derivative) critical_line = (im == 0) if critical_line : return z_half(ctx, w, derivative) else: return z_offline(ctx, w, derivative)
46,184
31.895299
90
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/zeta.py
from ..libmp.backend import xrange, print_ from .functions import defun, defun_wrapped, defun_static @defun def stieltjes(ctx, n, a=1): n = ctx.convert(n) a = ctx.convert(a) if n < 0: return ctx.bad_domain("Stieltjes constants defined for n >= 0") if hasattr(ctx, "stieltjes_cache"): stieltjes_cache = ctx.stieltjes_cache else: stieltjes_cache = ctx.stieltjes_cache = {} if a == 1: if n == 0: return +ctx.euler if n in stieltjes_cache: prec, s = stieltjes_cache[n] if prec >= ctx.prec: return +s mag = 1 def f(x): xa = x/a v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1) return ctx._re(v) / mag orig = ctx.prec try: # Normalize integrand by approx. magnitude to # speed up quadrature (which uses absolute error) if n > 50: ctx.prec = 20 mag = ctx.quad(f, [0,ctx.inf], maxdegree=3) ctx.prec = orig + 10 + int(n**0.5) s = ctx.quad(f, [0,ctx.inf], maxdegree=20) v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag finally: ctx.prec = orig if a == 1 and ctx.isint(n): stieltjes_cache[n] = (ctx.prec, v) return +v @defun_wrapped def siegeltheta(ctx, t, derivative=0): d = int(derivative) if (t == ctx.inf or t == ctx.ninf): if d < 2: if t == ctx.ninf and d == 0: return ctx.ninf return ctx.inf else: return ctx.zero if d == 0: if ctx._im(t): # XXX: cancellation occurs a = ctx.loggamma(0.25+0.5j*t) b = ctx.loggamma(0.25-0.5j*t) return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b) else: if ctx.isinf(t): return t return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t if d > 0: a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t) b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t) if ctx._im(t): if d == 1: return -0.5*ctx.log(ctx.pi)+0.25*(a+b) else: return 0.25*(a+b) else: if d == 1: return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b)) else: return ctx._re(0.25*(a+b)) @defun_wrapped def grampoint(ctx, n): # asymptotic expansion, from # http://mathworld.wolfram.com/GramPoint.html g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e))) return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g) @defun_wrapped def siegelz(ctx, t, **kwargs): d = int(kwargs.get("derivative", 0)) t = ctx.convert(t) t1 = ctx._re(t) t2 = ctx._im(t) prec = ctx.prec try: if abs(t1) > 500*prec and t2**2 < t1: v = ctx.rs_z(t, d) if ctx._is_real_type(t): return ctx._re(v) return v except NotImplementedError: pass ctx.prec += 21 e1 = ctx.expj(ctx.siegeltheta(t)) z = ctx.zeta(0.5+ctx.j*t) if d == 0: v = e1*z ctx.prec=prec if ctx._is_real_type(t): return ctx._re(v) return +v z1 = ctx.zeta(0.5+ctx.j*t, derivative=1) theta1 = ctx.siegeltheta(t, derivative=1) if d == 1: v = ctx.j*e1*(z1+z*theta1) ctx.prec=prec if ctx._is_real_type(t): return ctx._re(v) return +v z2 = ctx.zeta(0.5+ctx.j*t, derivative=2) theta2 = ctx.siegeltheta(t, derivative=2) comb1 = theta1**2-ctx.j*theta2 if d == 2: def terms(): return [2*z1*theta1, z2, z*comb1] v = ctx.sum_accurately(terms, 1) v = -e1*v ctx.prec = prec if ctx._is_real_type(t): return ctx._re(v) return +v ctx.prec += 10 z3 = ctx.zeta(0.5+ctx.j*t, derivative=3) theta3 = ctx.siegeltheta(t, derivative=3) comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3 if d == 3: def terms(): return [3*theta1*z2, 3*z1*comb1, z3+z*comb2] v = ctx.sum_accurately(terms, 1) v = -ctx.j*e1*v ctx.prec = prec if ctx._is_real_type(t): return ctx._re(v) return +v z4 = ctx.zeta(0.5+ctx.j*t, derivative=4) theta4 = ctx.siegeltheta(t, derivative=4) def terms(): return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2, -4*theta1*theta3, ctx.j*theta4] comb3 = ctx.sum_accurately(terms, 1) if d == 4: def terms(): return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3, 4*z1*comb2, z4, z*comb3] v = ctx.sum_accurately(terms, 1) v = e1*v ctx.prec = prec if ctx._is_real_type(t): return ctx._re(v) return +v if d > 4: h = lambda x: ctx.siegelz(x, derivative=4) return ctx.diff(h, t, n=d-4) _zeta_zeros = [ 14.134725142,21.022039639,25.010857580,30.424876126,32.935061588, 37.586178159,40.918719012,43.327073281,48.005150881,49.773832478, 52.970321478,56.446247697,59.347044003,60.831778525,65.112544048, 67.079810529,69.546401711,72.067157674,75.704690699,77.144840069, 79.337375020,82.910380854,84.735492981,87.425274613,88.809111208, 92.491899271,94.651344041,95.870634228,98.831194218,101.317851006, 103.725538040,105.446623052,107.168611184,111.029535543,111.874659177, 114.320220915,116.226680321,118.790782866,121.370125002,122.946829294, 124.256818554,127.516683880,129.578704200,131.087688531,133.497737203, 134.756509753,138.116042055,139.736208952,141.123707404,143.111845808, 146.000982487,147.422765343,150.053520421,150.925257612,153.024693811, 156.112909294,157.597591818,158.849988171,161.188964138,163.030709687, 165.537069188,167.184439978,169.094515416,169.911976479,173.411536520, 174.754191523,176.441434298,178.377407776,179.916484020,182.207078484, 184.874467848,185.598783678,187.228922584,189.416158656,192.026656361, 193.079726604,195.265396680,196.876481841,198.015309676,201.264751944, 202.493594514,204.189671803,205.394697202,207.906258888,209.576509717, 211.690862595,213.347919360,214.547044783,216.169538508,219.067596349, 220.714918839,221.430705555,224.007000255,224.983324670,227.421444280, 229.337413306,231.250188700,231.987235253,233.693404179,236.524229666, ] def _load_zeta_zeros(url): import urllib d = urllib.urlopen(url) L = [float(x) for x in d.readlines()] # Sanity check assert round(L[0]) == 14 _zeta_zeros[:] = L @defun def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'): n = int(n) if n < 0: return ctx.zetazero(-n).conjugate() if n == 0: raise ValueError("n must be nonzero") if n > len(_zeta_zeros) and n <= 100000: _load_zeta_zeros(url) if n > len(_zeta_zeros): raise NotImplementedError("n too large for zetazeros") return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1])) @defun_wrapped def riemannr(ctx, x): if x == 0: return ctx.zero # Check if a simple asymptotic estimate is accurate enough if abs(x) > 1000: a = ctx.li(x) b = 0.5*ctx.li(ctx.sqrt(x)) if abs(b) < abs(a)*ctx.eps: return a if abs(x) < 0.01: # XXX ctx.prec += int(-ctx.log(abs(x),2)) # Sum Gram's series s = t = ctx.one u = ctx.ln(x) k = 1 while abs(t) > abs(s)*ctx.eps: t = t * u / k s += t / (k * ctx._zeta_int(k+1)) k += 1 return s @defun_static def primepi(ctx, x): x = int(x) if x < 2: return 0 return len(ctx.list_primes(x)) # TODO: fix the interface wrt contexts @defun_wrapped def primepi2(ctx, x): x = int(x) if x < 2: return ctx._iv.zero if x < 2657: return ctx._iv.mpf(ctx.primepi(x)) mid = ctx.li(x) # Schoenfeld's estimate for x >= 2657, assuming RH err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d') a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d') b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u') return ctx._iv.mpf([a,b]) @defun_wrapped def primezeta(ctx, s): if ctx.isnan(s): return s if ctx.re(s) <= 0: raise ValueError("prime zeta function defined only for re(s) > 0") if s == 1: return ctx.inf if s == 0.5: return ctx.mpc(ctx.ninf, ctx.pi) r = ctx.re(s) if r > ctx.prec: return 0.5**s else: wp = ctx.prec + int(r) def terms(): orig = ctx.prec # zeta ~ 1+eps; need to set precision # to get logarithm accurately k = 0 while 1: k += 1 u = ctx.moebius(k) if not u: continue ctx.prec = wp t = u*ctx.ln(ctx.zeta(k*s))/k if not t: return #print ctx.prec, ctx.nstr(t) ctx.prec = orig yield t return ctx.sum_accurately(terms) # TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered @defun_wrapped def bernpoly(ctx, n, z): # Slow implementation: #return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1)) n = int(n) if n < 0: raise ValueError("Bernoulli polynomials only defined for n >= 0") if z == 0 or (z == 1 and n > 1): return ctx.bernoulli(n) if z == 0.5: return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n) if n <= 3: if n == 0: return z ** 0 if n == 1: return z - 0.5 if n == 2: return (6*z*(z-1)+1)/6 if n == 3: return z*(z*(z-1.5)+0.5) if ctx.isinf(z): return z ** n if ctx.isnan(z): return z if abs(z) > 2: def terms(): t = ctx.one yield t r = ctx.one/z k = 1 while k <= n: t = t*(n+1-k)/k*r if not (k > 2 and k & 1): yield t*ctx.bernoulli(k) k += 1 return ctx.sum_accurately(terms) * z**n else: def terms(): yield ctx.bernoulli(n) t = ctx.one k = 1 while k <= n: t = t*(n+1-k)/k * z m = n-k if not (m > 2 and m & 1): yield t*ctx.bernoulli(m) k += 1 return ctx.sum_accurately(terms) @defun_wrapped def eulerpoly(ctx, n, z): n = int(n) if n < 0: raise ValueError("Euler polynomials only defined for n >= 0") if n <= 2: if n == 0: return z ** 0 if n == 1: return z - 0.5 if n == 2: return z*(z-1) if ctx.isinf(z): return z**n if ctx.isnan(z): return z m = n+1 if z == 0: return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 if z == 1: return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 if z == 0.5: if n % 2: return ctx.zero # Use exact code for Euler numbers if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25: return ctx.ldexp(ctx._eulernum(n), -n) # http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/ def terms(): t = ctx.one k = 0 w = ctx.ldexp(1,n+2) while 1: v = n-k+1 if not (v > 2 and v & 1): yield (2-w)*ctx.bernoulli(v)*t k += 1 if k > n: break t = t*z*(n-k+2)/k w *= 0.5 return ctx.sum_accurately(terms) / m @defun def eulernum(ctx, n, exact=False): n = int(n) if exact: return int(ctx._eulernum(n)) if n < 100: return ctx.mpf(ctx._eulernum(n)) if n % 2: return ctx.zero return ctx.ldexp(ctx.eulerpoly(n,0.5), n) # TODO: this should be implemented low-level def polylog_series(ctx, s, z): tol = +ctx.eps l = ctx.zero k = 1 zk = z while 1: term = zk / k**s l += term if abs(term) < tol: break zk *= z k += 1 return l def polylog_continuation(ctx, n, z): if n < 0: return z*0 twopij = 2j * ctx.pi a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij) if ctx._is_real_type(z) and z < 0: a = ctx._re(a) if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1): a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1) return a def polylog_unitcircle(ctx, n, z): tol = +ctx.eps if n > 1: l = ctx.zero logz = ctx.ln(z) logmz = ctx.one m = 0 while 1: if (n-m) != 1: term = ctx.zeta(n-m) * logmz / ctx.fac(m) if term and abs(term) < tol: break l += term logmz *= logz m += 1 l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z))) elif n < 1: # else l = ctx.fac(-n)*(-ctx.ln(z))**(n-1) logz = ctx.ln(z) logkz = ctx.one k = 0 while 1: b = ctx.bernoulli(k-n+1) if b: term = b*logkz/(ctx.fac(k)*(k-n+1)) if abs(term) < tol: break l -= term logkz *= logz k += 1 else: raise ValueError if ctx._is_real_type(z) and z < 0: l = ctx._re(l) return l def polylog_general(ctx, s, z): v = ctx.zero u = ctx.ln(z) if not abs(u) < 5: # theoretically |u| < 2*pi raise NotImplementedError("polylog for arbitrary s and z") t = 1 k = 0 while 1: term = ctx.zeta(s-k) * t if abs(term) < ctx.eps: break v += term k += 1 t *= u t /= k return ctx.gamma(1-s)*(-u)**(s-1) + v @defun_wrapped def polylog(ctx, s, z): s = ctx.convert(s) z = ctx.convert(z) if z == 1: return ctx.zeta(s) if z == -1: return -ctx.altzeta(s) if s == 0: return z/(1-z) if s == 1: return -ctx.ln(1-z) if s == -1: return z/(1-z)**2 if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9): return polylog_series(ctx, s, z) if abs(z) >= 1.4 and ctx.isint(s): return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, s, z) if ctx.isint(s): return polylog_unitcircle(ctx, int(s), z) return polylog_general(ctx, s, z) #raise NotImplementedError("polylog for arbitrary s and z") # This could perhaps be used in some cases #from quadrature import quad #return quad(lambda t: t**(s-1)/(exp(t)/z-1),[0,inf])/gamma(s) @defun_wrapped def clsin(ctx, s, z, pi=False): if ctx.isint(s) and s < 0 and int(s) % 2 == 1: return z*0 if pi: a = ctx.expjpi(z) else: a = ctx.expj(z) if ctx._is_real_type(z) and ctx._is_real_type(s): return ctx.im(ctx.polylog(s,a)) b = 1/a return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b)) @defun_wrapped def clcos(ctx, s, z, pi=False): if ctx.isint(s) and s < 0 and int(s) % 2 == 0: return z*0 if pi: a = ctx.expjpi(z) else: a = ctx.expj(z) if ctx._is_real_type(z) and ctx._is_real_type(s): return ctx.re(ctx.polylog(s,a)) b = 1/a return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b)) @defun def altzeta(ctx, s, **kwargs): try: return ctx._altzeta(s, **kwargs) except NotImplementedError: return ctx._altzeta_generic(s) @defun_wrapped def _altzeta_generic(ctx, s): if s == 1: return ctx.ln2 + 0*s return -ctx.powm1(2, 1-s) * ctx.zeta(s) @defun def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs): d = int(derivative) if a == 1 and not (d or method): try: return ctx._zeta(s, **kwargs) except NotImplementedError: pass s = ctx.convert(s) prec = ctx.prec method = kwargs.get('method') verbose = kwargs.get('verbose') if (not s) and (not derivative): return ctx.mpf(0.5) - ctx._convert_param(a)[0] if a == 1 and method != 'euler-maclaurin': im = abs(ctx._im(s)) re = abs(ctx._re(s)) #if (im < prec or method == 'borwein') and not derivative: # try: # if verbose: # print "zeta: Attempting to use the Borwein algorithm" # return ctx._zeta(s, **kwargs) # except NotImplementedError: # if verbose: # print "zeta: Could not use the Borwein algorithm" # pass if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \ method == 'riemann-siegel': try: # py2.4 compatible try block try: if verbose: print("zeta: Attempting to use the Riemann-Siegel algorithm") return ctx.rs_zeta(s, derivative, **kwargs) except NotImplementedError: if verbose: print("zeta: Could not use the Riemann-Siegel algorithm") pass finally: ctx.prec = prec if s == 1: return ctx.inf abss = abs(s) if abss == ctx.inf: if ctx.re(s) == ctx.inf: if d == 0: return ctx.one return ctx.zero return s*0 elif ctx.isnan(abss): return 1/s if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative: return ctx.one + ctx.power(2, -s) return +ctx._hurwitz(s, a, d, **kwargs) @defun def _hurwitz(ctx, s, a=1, d=0, **kwargs): prec = ctx.prec verbose = kwargs.get('verbose') try: extraprec = 10 ctx.prec += extraprec # We strongly want to special-case rational a a, atype = ctx._convert_param(a) if ctx.re(s) < 0: if verbose: print("zeta: Attempting reflection formula") try: return _hurwitz_reflection(ctx, s, a, d, atype) except NotImplementedError: pass if verbose: print("zeta: Reflection formula failed") if verbose: print("zeta: Using the Euler-Maclaurin algorithm") while 1: ctx.prec = prec + extraprec T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose) cancellation = ctx.mag(T1) - ctx.mag(T1+T2) if verbose: print_("Term 1:", T1) print_("Term 2:", T2) print_("Cancellation:", cancellation, "bits") if cancellation < extraprec: return T1 + T2 else: extraprec = max(2*extraprec, min(cancellation + 5, 100*prec)) if extraprec > kwargs.get('maxprec', 100*prec): raise ctx.NoConvergence("zeta: too much cancellation") finally: ctx.prec = prec def _hurwitz_reflection(ctx, s, a, d, atype): # TODO: implement for derivatives if d != 0: raise NotImplementedError res = ctx.re(s) negs = -s # Integer reflection formula if ctx.isnpint(s): n = int(res) if n <= 0: return ctx.bernpoly(1-n, a) / (n-1) t = 1-s # We now require a to be standardized v = 0 shift = 0 b = a while ctx.re(b) > 1: b -= 1 v -= b**negs shift -= 1 while ctx.re(b) <= 0: v += b**negs b += 1 shift += 1 # Rational reflection formula if atype == 'Q' or atype == 'Z': try: p, q = a._mpq_ except: assert a == int(a) p = int(a) q = 1 p += shift*q assert 1 <= p <= q g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \ for k in range(1,q+1)) g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t v += g return v # General reflection formula # Note: clcos/clsin can raise NotImplementedError else: C1, C2 = ctx.cospi_sinpi(0.5*t) # Clausen functions; could maybe use polylog directly if C1: C1 *= ctx.clcos(t, 2*a, pi=True) if C2: C2 *= ctx.clsin(t, 2*a, pi=True) v += 2*ctx.gamma(t)/(2*ctx.pi)**t*(C1+C2) return v def _hurwitz_em(ctx, s, a, d, prec, verbose): # May not be converted at this point a = ctx.convert(a) tol = -prec # Estimate number of terms for Euler-Maclaurin summation; could be improved M1 = 0 M2 = prec // 3 N = M2 lsum = 0 # This speeds up the recurrence for derivatives if ctx.isint(s): s = int(ctx._re(s)) s1 = s-1 while 1: # Truncated L-series l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0] #if d: # l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2)) #else: # l = ctx.fsum((n+a)**negs for n in range(M1,M2)) lsum += l M2a = M2+a logM2a = ctx.ln(M2a) logM2ad = logM2a**d logs = [logM2ad] logr = 1/logM2a rM2a = 1/M2a M2as = rM2a**s if d: tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1) else: tailsum = 1/((s1)*(M2a)**s1) tailsum += 0.5 * logM2ad * M2as U = [1] r = M2as fact = 2 for j in range(1, N+1): # TODO: the following could perhaps be tidied a bit j2 = 2*j if j == 1: upds = [1] else: upds = [j2-2, j2-1] for m in upds: D = min(m,d+1) if m <= d: logs.append(logs[-1] * logr) Un = [0]*(D+1) for i in xrange(D): Un[i] = (1-m-s)*U[i] for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1] U = Un r *= rM2a t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact) tailsum += t if ctx.mag(t) < tol: return lsum, (-1)**d * tailsum fact *= (j2+1)*(j2+2) if verbose: print_("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol) M1, M2 = M2, M2*2 if ctx.re(s) < 0: N += N//2 @defun def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False): """ Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s ) ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) ) D^k = kth derivative with respect to s, k ranges over the given list of derivatives (which should consist of either a single element or a range 0,1,...r). If reflect=False, the ydks are not computed. """ #print "zetasum", s, a, n # don't use the fixed-point code if there are large exponentials if abs(ctx.re(s)) < 0.5 * ctx.prec: try: return ctx._zetasum_fast(s, a, n, derivatives, reflect) except NotImplementedError: pass negs = ctx.fneg(s, exact=True) have_derivatives = derivatives != [0] have_one_derivative = len(derivatives) == 1 if not reflect: if not have_derivatives: return [ctx.fsum((a+k)**negs for k in xrange(n+1))], [] if have_one_derivative: d = derivatives[0] x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1)) return [(-1)**d * x], [] maxd = max(derivatives) if not have_one_derivative: derivatives = range(maxd+1) xs = [ctx.zero for d in derivatives] if reflect: ys = [ctx.zero for d in derivatives] else: ys = [] for k in xrange(n+1): w = a + k xterm = w ** negs if reflect: yterm = ctx.conj(ctx.one / (w * xterm)) if have_derivatives: logw = -ctx.ln(w) if have_one_derivative: logw = logw ** maxd xs[0] += xterm * logw if reflect: ys[0] += yterm * logw else: t = ctx.one for d in derivatives: xs[d] += xterm * t if reflect: ys[d] += yterm * t t *= logw else: xs[0] += xterm if reflect: ys[0] += yterm return xs, ys @defun def dirichlet(ctx, s, chi=[1], derivative=0): s = ctx.convert(s) q = len(chi) d = int(derivative) if d > 2: raise NotImplementedError("arbitrary order derivatives") prec = ctx.prec try: ctx.prec += 10 if s == 1: have_pole = True for x in chi: if x and x != 1: have_pole = False h = +ctx.eps ctx.prec *= 2*(d+1) s += h if have_pole: return +ctx.inf z = ctx.zero for p in range(1,q+1): if chi[p%q]: if d == 1: z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \ ctx.zeta(s, (p,q))*ctx.log(q)) else: z += chi[p%q] * ctx.zeta(s, (p,q)) z /= q**s finally: ctx.prec = prec return +z def secondzeta_main_term(ctx, s, a, **kwargs): tol = ctx.eps f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s) totsum = term = ctx.zero mg = ctx.inf n = 0 while mg > tol: totsum += term n += 1 gamm = ctx.im(ctx.zetazero_memoized(n)) term = f(n) mg = abs(term) err = 0 if kwargs.get("error"): sg = ctx.re(s) err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\ ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2)) err = abs(err) return +totsum, err, n def secondzeta_prime_term(ctx, s, a, **kwargs): tol = ctx.eps f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\ ((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\ (2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi)) totsum = term = ctx.zero mg = ctx.inf n = 1 while mg > tol or n < 9: totsum += term n += 1 term = f(n) if term == 0: mg = ctx.inf else: mg = abs(term) if kwargs.get("error"): err = mg return +totsum, err, n def secondzeta_exp_term(ctx, s, a): if ctx.isint(s) and ctx.re(s) <= 0: m = int(round(ctx.re(s))) if not m & 1: return ctx.mpf('-0.25')**(-m//2) tol = ctx.eps f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n)) totsum = ctx.zero term = f(0) mg = ctx.inf n = 0 while mg > tol: totsum += term n += 1 term = f(n) mg = abs(term) v = a**(0.5*s)*totsum/ctx.gamma(0.5*s) return v def secondzeta_singular_term(ctx, s, a, **kwargs): factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) extraprec = ctx.mag(factor) ctx.prec += extraprec factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) tol = ctx.eps f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\ ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n)) totsum = ctx.zero mg1 = ctx.inf n = 1 term = f(n) mg2 = abs(term) while mg2 > tol and mg2 <= mg1: totsum += term n += 1 term = f(n) totsum += term n +=1 term = f(n) mg1 = mg2 mg2 = abs(term) totsum += term pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1) st = factor*(pole+totsum) err = 0 if kwargs.get("error"): if not ((mg2 > tol) and (mg2 <= mg1)): if mg2 <= tol: err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10)) if mg2 > mg1: err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10)) err = max(err, ctx.eps*1.) ctx.prec -= extraprec return +st, err @defun def secondzeta(ctx, s, a = 0.015, **kwargs): r""" Evaluates the secondary zeta function `Z(s)`, defined for `\mathrm{Re}(s)>1` by .. math :: Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s} where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with imaginary part positive. `Z(s)` extends to a meromorphic function on `\mathbb{C}` with a double pole at `s=1` and simple poles at the points `-2n` for `n=0`, 1, 2, ... **Examples** >>> from mpmath import * >>> mp.pretty = True; mp.dps = 15 >>> secondzeta(2) 0.023104993115419 >>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s) >>> Xi = lambda t: xi(0.5+t*j) >>> -0.5*diff(Xi,0,n=2)/Xi(0) (0.023104993115419 + 0.0j) We may ask for an approximate error value:: >>> secondzeta(0.5+100j, error=True) ((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16) The function has poles at the negative odd integers, and dyadic rational values at the negative even integers:: >>> mp.dps = 30 >>> secondzeta(-8) -0.67236328125 >>> secondzeta(-7) +inf **Implementation notes** The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)` respectively main, prime, exponential and singular terms. The main term `A(s)` is computed from the zeros of zeta. The prime term depends on the von Mangoldt function. The singular term is responsible for the poles of the function. The four terms depends on a small parameter `a`. We may change the value of `a`. Theoretically this has no effect on the sum of the four terms, but in practice may be important. A smaller value of the parameter `a` makes `A(s)` depend on a smaller number of zeros of zeta, but `P(s)` uses more values of von Mangoldt function. We may also add a verbose option to obtain data about the values of the four terms. >>> mp.dps = 10 >>> secondzeta(0.5 + 40j, error=True, verbose=True) main term = (-30190318549.138656312556 - 13964804384.624622876523j) computed using 19 zeros of zeta prime term = (132717176.89212754625045 + 188980555.17563978290601j) computed using 9 values of the von Mangoldt function exponential term = (542447428666.07179812536 + 362434922978.80192435203j) singular term = (512124392939.98154322355 + 348281138038.65531023921j) ((0.059471043 + 0.3463514534j), 1.455191523e-11) >>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True) main term = (-151962888.19606243907725 - 217930683.90210294051982j) computed using 9 zeros of zeta prime term = (2476659342.3038722372461 + 28711581821.921627163136j) computed using 37 values of the von Mangoldt function exponential term = (178506047114.7838188264 + 819674143244.45677330576j) singular term = (175877424884.22441310708 + 790744630738.28669174871j) ((0.059471043 + 0.3463514534j), 1.455191523e-11) Notice the great cancellation between the four terms. Changing `a`, the four terms are very different numbers but the cancellation gives the good value of Z(s). **References** A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier, 53, (2003) 665--699. A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes of the Unione Matematica Italiana, Springer, 2009. """ s = ctx.convert(s) a = ctx.convert(a) tol = ctx.eps if ctx.isint(s) and ctx.re(s) <= 1: if abs(s-1) < tol*1000: return ctx.inf m = int(round(ctx.re(s))) if m & 1: return ctx.inf else: return ((-1)**(-m//2)*\ ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3))) prec = ctx.prec try: t3 = secondzeta_exp_term(ctx, s, a) extraprec = max(ctx.mag(t3),0) ctx.prec += extraprec + 3 t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True') t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True') t4, r4 = secondzeta_singular_term(ctx,s,a,error='True') t3 = secondzeta_exp_term(ctx, s, a) err = r1+r2+r4 t = t1-t2+t3-t4 if kwargs.get("verbose"): print_('main term =', t1) print_(' computed using', gt, 'zeros of zeta') print_('prime term =', t2) print_(' computed using', pt, 'values of the von Mangoldt function') print_('exponential term =', t3) print_('singular term =', t4) finally: ctx.prec = prec if kwargs.get("error"): w = max(ctx.mag(abs(t)),0) err = max(err*2**w, ctx.eps*1.*2**w) return +t, err return +t @defun_wrapped def lerchphi(ctx, z, s, a): r""" Gives the Lerch transcendent, defined for `|z| < 1` and `\Re{a} > 0` by .. math :: \Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s} and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}` along with the integral representation valid for `\Re{a} > 0` .. math :: \Phi(z,s,a) = \frac{1}{2 a^s} + \int_0^{\infty} \frac{z^t}{(a+t)^s} dt - 2 \int_0^{\infty} \frac{\sin(t \log z - s \operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2} (e^{2 \pi t}-1)} dt. The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta` (`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`). **Examples** Several evaluations in terms of simpler functions:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> lerchphi(-1,2,0.5); 4*catalan 3.663862376708876060218414 3.663862376708876060218414 >>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2) 0.2131391994087528954617607 0.2131391994087528954617607 >>> lerchphi(-4,1,1); log(5)/4 0.4023594781085250936501898 0.4023594781085250936501898 >>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j) (1.142423447120257137774002 + 0.2118232380980201350495795j) (1.142423447120257137774002 + 0.2118232380980201350495795j) Evaluation works for complex arguments and `|z| \ge 1`:: >>> lerchphi(1+2j, 3-j, 4+2j) (0.002025009957009908600539469 + 0.003327897536813558807438089j) >>> lerchphi(-2,2,-2.5) -12.28676272353094275265944 >>> lerchphi(10,10,10) (-4.462130727102185701817349e-11 - 1.575172198981096218823481e-12j) >>> lerchphi(10,10,-10.5) (112658784011940.5605789002 - 498113185.5756221777743631j) Some degenerate cases:: >>> lerchphi(0,1,2) 0.5 >>> lerchphi(0,1,-2) -0.5 Reduction to simpler functions:: >>> lerchphi(1, 4.25+1j, 1) (1.044674457556746668033975 - 0.04674508654012658932271226j) >>> zeta(4.25+1j) (1.044674457556746668033975 - 0.04674508654012658932271226j) >>> lerchphi(1 - 0.5**10, 4.25+1j, 1) (1.044629338021507546737197 - 0.04667768813963388181708101j) >>> lerchphi(3, 4, 1) (1.249503297023366545192592 - 0.2314252413375664776474462j) >>> polylog(4, 3) / 3 (1.249503297023366545192592 - 0.2314252413375664776474462j) >>> lerchphi(3, 4, 1 - 0.5**10) (1.253978063946663945672674 - 0.2316736622836535468765376j) **References** 1. [DLMF]_ section 25.14 """ if z == 0: return a ** (-s) # Faster, but these cases are useful for testing right now if z == 1: return ctx.zeta(s, a) if a == 1: return ctx.polylog(s, z) / z if ctx.re(a) < 1: if ctx.isnpint(a): raise ValueError("Lerch transcendent complex infinity") m = int(ctx.ceil(1-ctx.re(a))) v = ctx.zero zpow = ctx.one for n in xrange(m): v += zpow / (a+n)**s zpow *= z return zpow * ctx.lerchphi(z,s, a+m) + v g = ctx.ln(z) v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a h = s / 2 r = 2*ctx.pi f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \ ((a**2+t**2)**h * ctx.expm1(r*t)) v += 2*ctx.quad(f, [0, ctx.inf]) if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1: v = ctx.chop(v) return v
36,859
30.693895
88
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/orthogonal.py
from .functions import defun, defun_wrapped def _hermite_param(ctx, n, z, parabolic_cylinder): """ Combined calculation of the Hermite polynomial H_n(z) (and its generalization to complex n) and the parabolic cylinder function D. """ n, ntyp = ctx._convert_param(n) z = ctx.convert(z) q = -ctx.mpq_1_2 # For re(z) > 0, 2F0 -- http://functions.wolfram.com/ # HypergeometricFunctions/HermiteHGeneral/06/02/0009/ # Otherwise, there is a reflection formula # 2F0 + http://functions.wolfram.com/HypergeometricFunctions/ # HermiteHGeneral/16/01/01/0006/ # # TODO: # An alternative would be to use # http://functions.wolfram.com/HypergeometricFunctions/ # HermiteHGeneral/06/02/0006/ # # Also, the 1F1 expansion # http://functions.wolfram.com/HypergeometricFunctions/ # HermiteHGeneral/26/01/02/0001/ # should probably be used for tiny z if not z: T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0 if parabolic_cylinder: T1[1][0] += q*n return T1, can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \ (ctx.re(z) == 0 and ctx.im(z) > 0) expprec = ctx.prec*4 + 20 if parabolic_cylinder: u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True) w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec) else: w = z w2 = ctx.fmul(w, w, prec=expprec) rw2 = ctx.fdiv(1, w2, prec=expprec) nrw2 = ctx.fneg(rw2, exact=True) nw = ctx.fneg(w, exact=True) if can_use_2f0: T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 terms = [T1] else: T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2 terms = [T1,T2] # Multiply by prefactor for D_n if parabolic_cylinder: expu = ctx.exp(u) for i in range(len(terms)): terms[i][1][0] += q*n terms[i][0].append(expu) terms[i][1].append(1) return tuple(terms) @defun def hermite(ctx, n, z, **kwargs): return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs) @defun def pcfd(ctx, n, z, **kwargs): r""" Gives the parabolic cylinder function in Whittaker's notation `D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`). It solves the differential equation .. math :: y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0. and can be represented in terms of Hermite polynomials (see :func:`~mpmath.hermite`) as .. math :: D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right). **Plots** .. literalinclude :: /plots/pcfd.py .. image :: /plots/pcfd.png **Examples** >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0) 1.0 0.0 -1.0 0.0 >>> pcfd(4,0); pcfd(-3,0) 3.0 0.6266570686577501256039413 >>> pcfd('1/2', 2+3j) (-5.363331161232920734849056 - 3.858877821790010714163487j) >>> pcfd(2, -10) 1.374906442631438038871515e-9 Verifying the differential equation:: >>> n = mpf(2.5) >>> y = lambda z: pcfd(n,z) >>> z = 1.75 >>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z)) 0.0 Rational Taylor series expansion when `n` is an integer:: >>> taylor(lambda z: pcfd(5,z), 0, 7) [0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625] """ return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs) @defun def pcfu(ctx, a, z, **kwargs): r""" Gives the parabolic cylinder function `U(a,z)`, which may be defined for `\Re(z) > 0` in terms of the confluent U-function (see :func:`~mpmath.hyperu`) by .. math :: U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2} U\left(\frac{a}{2}+\frac{1}{4}, \frac{1}{2}, \frac{1}{2}z^2\right) or, for arbitrary `z`, .. math :: e^{-\frac{1}{4}z^2} U(a,z) = U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4}; \tfrac{1}{2}; -\tfrac{1}{2}z^2\right) + U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4}; \tfrac{3}{2}; -\tfrac{1}{2}z^2\right). **Examples** Connection to other functions:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> z = mpf(3) >>> pcfu(0.5,z) 0.03210358129311151450551963 >>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2)) 0.03210358129311151450551963 >>> pcfu(0.5,-z) 23.75012332835297233711255 >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) 23.75012332835297233711255 >>> pcfu(0.5,-z) 23.75012332835297233711255 >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) 23.75012332835297233711255 """ n, _ = ctx._convert_param(a) return ctx.pcfd(-n-ctx.mpq_1_2, z) @defun def pcfv(ctx, a, z, **kwargs): r""" Gives the parabolic cylinder function `V(a,z)`, which can be represented in terms of :func:`~mpmath.pcfu` as .. math :: V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}. **Examples** Wronskian relation between `U` and `V`:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a, z = 2, 3 >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) 0.7978845608028653558798921 >>> sqrt(2/pi) 0.7978845608028653558798921 >>> a, z = 2.5, 3 >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) 0.7978845608028653558798921 >>> a, z = 0.25, -1 >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) 0.7978845608028653558798921 >>> a, z = 2+1j, 2+3j >>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)) 0.7978845608028653558798921 """ n, ntype = ctx._convert_param(a) z = ctx.convert(z) q = ctx.mpq_1_2 r = ctx.mpq_1_4 if ntype == 'Q' and ctx.isint(n*2): # Faster for half-integers def h(): jz = ctx.fmul(z, -1j, exact=True) T1terms = _hermite_param(ctx, -n-q, z, 1) T2terms = _hermite_param(ctx, n-q, jz, 1) for T in T1terms: T[0].append(1j) T[1].append(1) T[3].append(q-n) u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi) for T in T2terms: T[0].append(u) T[1].append(1) return T1terms + T2terms v = ctx.hypercomb(h, [], **kwargs) if ctx._is_real_type(n) and ctx._is_real_type(z): v = ctx._re(v) return v else: def h(n): w = ctx.square_exp_arg(z, -0.25) u = ctx.square_exp_arg(z, 0.5) e = ctx.exp(w) l = [ctx.pi, q, ctx.exp(w)] Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u c, s = ctx.cospi_sinpi(r+q*n) Y1[0].append(s) Y2[0].append(c) for Y in (Y1, Y2): Y[1].append(1) Y[3].append(q-n) return Y1, Y2 return ctx.hypercomb(h, [n], **kwargs) @defun def pcfw(ctx, a, z, **kwargs): r""" Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14). **Examples** Value at the origin:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> a = mpf(0.25) >>> pcfw(a,0) 0.9722833245718180765617104 >>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a))) 0.9722833245718180765617104 >>> diff(pcfw,(a,0),(0,1)) -0.5142533944210078966003624 >>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a))) -0.5142533944210078966003624 """ n, _ = ctx._convert_param(a) z = ctx.convert(z) def terms(): phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n)) phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j rho = ctx.pi/8 + 0.5*phi2 # XXX: cancellation computing k k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n) C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n) yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25)) yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25)) v = ctx.sum_accurately(terms) if ctx._is_real_type(n) and ctx._is_real_type(z): v = ctx._re(v) return v """ Even/odd PCFs. Useful? @defun def pcfy1(ctx, a, z, **kwargs): a, _ = ctx._convert_param(n) z = ctx.convert(z) def h(): w = ctx.square_exp_arg(z) w1 = ctx.fmul(w, -0.25, exact=True) w2 = ctx.fmul(w, 0.5, exact=True) e = ctx.exp(w1) return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2 return ctx.hypercomb(h, [], **kwargs) @defun def pcfy2(ctx, a, z, **kwargs): a, _ = ctx._convert_param(n) z = ctx.convert(z) def h(): w = ctx.square_exp_arg(z) w1 = ctx.fmul(w, -0.25, exact=True) w2 = ctx.fmul(w, 0.5, exact=True) e = ctx.exp(w1) return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \ [ctx.mpq_3_2], w2 return ctx.hypercomb(h, [], **kwargs) """ @defun_wrapped def gegenbauer(ctx, n, a, z, **kwargs): # Special cases: a+0.5, a*2 poles if ctx.isnpint(a): return 0*(z+n) if ctx.isnpint(a+0.5): # TODO: something else is required here # E.g.: gegenbauer(-2, -0.5, 3) == -12 if ctx.isnpint(n+1): raise NotImplementedError("Gegenbauer function with two limits") def h(a): a2 = 2*a T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) return [T] return ctx.hypercomb(h, [a], **kwargs) def h(n): a2 = 2*a T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) return [T] return ctx.hypercomb(h, [n], **kwargs) @defun_wrapped def jacobi(ctx, n, a, b, x, **kwargs): if not ctx.isnpint(a): def h(n): return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),) return ctx.hypercomb(h, [n], **kwargs) if not ctx.isint(b): def h(n, a): return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),) return ctx.hypercomb(h, [n, a], **kwargs) # XXX: determine appropriate limit return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs) @defun_wrapped def laguerre(ctx, n, a, z, **kwargs): # XXX: limits, poles #if ctx.isnpint(n): # return 0*(a+z) def h(a): return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),) return ctx.hypercomb(h, [a], **kwargs) @defun_wrapped def legendre(ctx, n, x, **kwargs): if ctx.isint(n): n = int(n) # Accuracy near zeros if (n + (n < 0)) & 1: if not x: return x mag = ctx.mag(x) if mag < -2*ctx.prec-10: return x if mag < -5: ctx.prec += -mag return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs) @defun def legenp(ctx, n, m, z, type=2, **kwargs): # Legendre function, 1st kind n = ctx.convert(n) m = ctx.convert(m) # Faster if not m: return ctx.legendre(n, z, **kwargs) # TODO: correct evaluation at singularities if type == 2: def h(n,m): g = m*0.5 T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) return (T,) return ctx.hypercomb(h, [n,m], **kwargs) if type == 3: def h(n,m): g = m*0.5 T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) return (T,) return ctx.hypercomb(h, [n,m], **kwargs) raise ValueError("requires type=2 or type=3") @defun def legenq(ctx, n, m, z, type=2, **kwargs): # Legendre function, 2nd kind n = ctx.convert(n) m = ctx.convert(m) z = ctx.convert(z) if z in (1, -1): #if ctx.isint(m): # return ctx.nan #return ctx.inf # unsigned return ctx.nan if type == 2: def h(n, m): cos, sin = ctx.cospi_sinpi(m) s = 2 * sin / ctx.pi c = cos a = 1+z b = 1-z u = m/2 w = (1-z)/2 T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ [-n, n+1], [1-m], w T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \ [-n, n+1], [m+1], w return T1, T2 return ctx.hypercomb(h, [n, m], **kwargs) if type == 3: # The following is faster when there only is a single series # Note: not valid for -1 < z < 0 (?) if abs(z) > 1: def h(n, m): T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \ [1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \ [n+m+1], [n+1.5], \ [0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2) return [T1] return ctx.hypercomb(h, [n, m], **kwargs) else: # not valid for 1 < z < inf ? def h(n, m): s = 2 * ctx.sinpi(m) / ctx.pi c = ctx.expjpi(m) a = 1+z b = z-1 u = m/2 w = (1-z)/2 T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ [-n, n+1], [1-m], w T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \ [-n, n+1], [m+1], w return T1, T2 return ctx.hypercomb(h, [n, m], **kwargs) raise ValueError("requires type=2 or type=3") @defun_wrapped def chebyt(ctx, n, x, **kwargs): if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: return x * 0 return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs) @defun_wrapped def chebyu(ctx, n, x, **kwargs): if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: return x * 0 return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs) @defun def spherharm(ctx, l, m, theta, phi, **kwargs): l = ctx.convert(l) m = ctx.convert(m) theta = ctx.convert(theta) phi = ctx.convert(phi) l_isint = ctx.isint(l) l_natural = l_isint and l >= 0 m_isint = ctx.isint(m) if l_isint and l < 0 and m_isint: return ctx.spherharm(-(l+1), m, theta, phi, **kwargs) if theta == 0 and m_isint and m < 0: return ctx.zero * 1j if l_natural and m_isint: if abs(m) > l: return ctx.zero * 1j # http://functions.wolfram.com/Polynomials/ # SphericalHarmonicY/26/01/02/0004/ def h(l,m): absm = abs(m) C = [-1, ctx.expj(m*phi), (2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm), ctx.sin(theta)**2, ctx.fac(absm), 2] P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1] return ((C, P, [], [], [absm-l, l+absm+1], [absm+1], ctx.sin(0.5*theta)**2),) else: # http://functions.wolfram.com/HypergeometricFunctions/ # SphericalHarmonicYGeneral/26/01/02/0001/ def h(l,m): if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m): return (([0], [-1], [], [], [], [], 0),) cos, sin = ctx.cos_sin(0.5*theta) C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi, ctx.gamma(l-m+1), ctx.gamma(l+m+1), cos**2, sin**2] P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m] return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),) return ctx.hypercomb(h, [l,m], **kwargs)
16,097
31.587045
84
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/expintegrals.py
from .functions import defun, defun_wrapped @defun_wrapped def _erf_complex(ctx, z): z2 = ctx.square_exp_arg(z, -1) #z2 = -z**2 v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2) if not ctx._re(z): v = ctx._im(v)*ctx.j return v @defun_wrapped def _erfc_complex(ctx, z): if ctx.re(z) > 2: z2 = ctx.square_exp_arg(z) nz2 = ctx.fneg(z2, exact=True) v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2) else: v = 1 - ctx._erf_complex(z) if not ctx._re(z): v = 1+ctx._im(v)*ctx.j return v @defun def erf(ctx, z): z = ctx.convert(z) if ctx._is_real_type(z): try: return ctx._erf(z) except NotImplementedError: pass if ctx._is_complex_type(z) and not z.imag: try: return type(z)(ctx._erf(z.real)) except NotImplementedError: pass return ctx._erf_complex(z) @defun def erfc(ctx, z): z = ctx.convert(z) if ctx._is_real_type(z): try: return ctx._erfc(z) except NotImplementedError: pass if ctx._is_complex_type(z) and not z.imag: try: return type(z)(ctx._erfc(z.real)) except NotImplementedError: pass return ctx._erfc_complex(z) @defun def square_exp_arg(ctx, z, mult=1, reciprocal=False): prec = ctx.prec*4+20 if reciprocal: z2 = ctx.fmul(z, z, prec=prec) z2 = ctx.fdiv(ctx.one, z2, prec=prec) else: z2 = ctx.fmul(z, z, prec=prec) if mult != 1: z2 = ctx.fmul(z2, mult, exact=True) return z2 @defun_wrapped def erfi(ctx, z): if not z: return z z2 = ctx.square_exp_arg(z) v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2) if not ctx._re(z): v = ctx._im(v)*ctx.j return v @defun_wrapped def erfinv(ctx, x): xre = ctx._re(x) if (xre != x) or (xre < -1) or (xre > 1): return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1") x = xre #if ctx.isnan(x): return x if not x: return x if x == 1: return ctx.inf if x == -1: return ctx.ninf if abs(x) < 0.9: a = 0.53728*x**3 + 0.813198*x else: # An asymptotic formula u = ctx.ln(2/ctx.pi/(abs(x)-1)**2) a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2) ctx.prec += 10 return ctx.findroot(lambda t: ctx.erf(t)-x, a) @defun_wrapped def npdf(ctx, x, mu=0, sigma=1): sigma = ctx.convert(sigma) return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi)) @defun_wrapped def ncdf(ctx, x, mu=0, sigma=1): a = (x-mu)/(sigma*ctx.sqrt(2)) if a < 0: return ctx.erfc(-a)/2 else: return (1+ctx.erf(a))/2 @defun_wrapped def betainc(ctx, a, b, x1=0, x2=1, regularized=False): if x1 == x2: v = 0 elif not x1: if x1 == 0 and x2 == 1: v = ctx.beta(a, b) else: v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a else: m, d = ctx.nint_distance(a) if m <= 0: if d < -ctx.prec: h = +ctx.eps ctx.prec *= 2 a += h elif d < -4: ctx.prec -= d s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2) s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1) v = (s1 - s2) / a if regularized: v /= ctx.beta(a,b) return v @defun def gammainc(ctx, z, a=0, b=None, regularized=False): regularized = bool(regularized) z = ctx.convert(z) if a is None: a = ctx.zero lower_modified = False else: a = ctx.convert(a) lower_modified = a != ctx.zero if b is None: b = ctx.inf upper_modified = False else: b = ctx.convert(b) upper_modified = b != ctx.inf # Complete gamma function if not (upper_modified or lower_modified): if regularized: if ctx.re(z) < 0: return ctx.inf elif ctx.re(z) > 0: return ctx.one else: return ctx.nan return ctx.gamma(z) if a == b: return ctx.zero # Standardize if ctx.re(a) > ctx.re(b): return -ctx.gammainc(z, b, a, regularized) # Generalized gamma if upper_modified and lower_modified: return +ctx._gamma3(z, a, b, regularized) # Upper gamma elif lower_modified: return ctx._upper_gamma(z, a, regularized) # Lower gamma elif upper_modified: return ctx._lower_gamma(z, b, regularized) @defun def _lower_gamma(ctx, z, b, regularized=False): # Pole if ctx.isnpint(z): return type(z)(ctx.inf) G = [z] * regularized negb = ctx.fneg(b, exact=True) def h(z): T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b return (T1,) return ctx.hypercomb(h, [z]) @defun def _upper_gamma(ctx, z, a, regularized=False): # Fast integer case, when available if ctx.isint(z): try: if regularized: # Gamma pole if ctx.isnpint(z): return type(z)(ctx.zero) orig = ctx.prec try: ctx.prec += 10 return ctx._gamma_upper_int(z, a) / ctx.gamma(z) finally: ctx.prec = orig else: return ctx._gamma_upper_int(z, a) except NotImplementedError: pass # hypercomb is unable to detect the exact zeros, so handle them here if z == 2 and a == -1: return (z+a)*0 if z == 3 and (a == -1-1j or a == -1+1j): return (z+a)*0 nega = ctx.fneg(a, exact=True) G = [z] * regularized # Use 2F0 series when possible; fall back to lower gamma representation try: def h(z): r = z-1 return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)] return ctx.hypercomb(h, [z], force_series=True) except ctx.NoConvergence: def h(z): T1 = [], [1, z-1], [z], G, [], [], 0 T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a return T1, T2 return ctx.hypercomb(h, [z]) @defun def _gamma3(ctx, z, a, b, regularized=False): pole = ctx.isnpint(z) if regularized and pole: return ctx.zero try: ctx.prec += 15 # We don't know in advance whether it's better to write as a difference # of lower or upper gamma functions, so try both T1 = ctx.gammainc(z, a, regularized=regularized) T2 = ctx.gammainc(z, b, regularized=regularized) R = T1 - T2 if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: return R if not pole: T1 = ctx.gammainc(z, 0, b, regularized=regularized) T2 = ctx.gammainc(z, 0, a, regularized=regularized) R = T1 - T2 # May be ok, but should probably at least print a warning # about possible cancellation if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: return R finally: ctx.prec -= 15 raise NotImplementedError @defun_wrapped def expint(ctx, n, z): if ctx.isint(n) and ctx._is_real_type(z): try: return ctx._expint_int(n, z) except NotImplementedError: pass if ctx.isnan(n) or ctx.isnan(z): return z*n if z == ctx.inf: return 1/z if z == 0: # integral from 1 to infinity of t^n if ctx.re(n) <= 1: # TODO: reasonable sign of infinity return type(z)(ctx.inf) else: return ctx.one/(n-1) if n == 0: return ctx.exp(-z)/z if n == -1: return ctx.exp(-z)*(z+1)/z**2 return z**(n-1) * ctx.gammainc(1-n, z) @defun_wrapped def li(ctx, z, offset=False): if offset: if z == 2: return ctx.zero return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2) if not z: return z if z == 1: return ctx.ninf return ctx.ei(ctx.ln(z)) @defun def ei(ctx, z): try: return ctx._ei(z) except NotImplementedError: return ctx._ei_generic(z) @defun_wrapped def _ei_generic(ctx, z): # Note: the following is currently untested because mp and fp # both use special-case ei code if z == ctx.inf: return z if z == ctx.ninf: return ctx.zero if ctx.mag(z) > 1: try: r = ctx.one/z v = ctx.exp(z)*ctx.hyper([1,1],[],r, maxterms=ctx.prec, force_series=True)/z im = ctx._im(z) if im > 0: v += ctx.pi*ctx.j if im < 0: v -= ctx.pi*ctx.j return v except ctx.NoConvergence: pass v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler if ctx._im(z): v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z)) else: v += ctx.log(abs(z)) return v @defun def e1(ctx, z): try: return ctx._e1(z) except NotImplementedError: return ctx.expint(1, z) @defun def ci(ctx, z): try: return ctx._ci(z) except NotImplementedError: return ctx._ci_generic(z) @defun_wrapped def _ci_generic(ctx, z): if ctx.isinf(z): if z == ctx.inf: return ctx.zero if z == ctx.ninf: return ctx.pi*1j jz = ctx.fmul(ctx.j,z,exact=True) njz = ctx.fneg(jz,exact=True) v = 0.5*(ctx.ei(jz) + ctx.ei(njz)) zreal = ctx._re(z) zimag = ctx._im(z) if zreal == 0: if zimag > 0: v += ctx.pi*0.5j if zimag < 0: v -= ctx.pi*0.5j if zreal < 0: if zimag >= 0: v += ctx.pi*1j if zimag < 0: v -= ctx.pi*1j if ctx._is_real_type(z) and zreal > 0: v = ctx._re(v) return v @defun def si(ctx, z): try: return ctx._si(z) except NotImplementedError: return ctx._si_generic(z) @defun_wrapped def _si_generic(ctx, z): if ctx.isinf(z): if z == ctx.inf: return 0.5*ctx.pi if z == ctx.ninf: return -0.5*ctx.pi # Suffers from cancellation near 0 if ctx.mag(z) >= -1: jz = ctx.fmul(ctx.j,z,exact=True) njz = ctx.fneg(jz,exact=True) v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz)) zreal = ctx._re(z) if zreal > 0: v -= 0.5*ctx.pi if zreal < 0: v += 0.5*ctx.pi if ctx._is_real_type(z): v = ctx._re(v) return v else: return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z) @defun_wrapped def chi(ctx, z): nz = ctx.fneg(z, exact=True) v = 0.5*(ctx.ei(z) + ctx.ei(nz)) zreal = ctx._re(z) zimag = ctx._im(z) if zimag > 0: v += ctx.pi*0.5j elif zimag < 0: v -= ctx.pi*0.5j elif zreal < 0: v += ctx.pi*1j return v @defun_wrapped def shi(ctx, z): # Suffers from cancellation near 0 if ctx.mag(z) >= -1: nz = ctx.fneg(z, exact=True) v = 0.5*(ctx.ei(z) - ctx.ei(nz)) zimag = ctx._im(z) if zimag > 0: v -= 0.5j*ctx.pi if zimag < 0: v += 0.5j*ctx.pi return v else: return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z) @defun_wrapped def fresnels(ctx, z): if z == ctx.inf: return ctx.mpf(0.5) if z == ctx.ninf: return ctx.mpf(-0.5) return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16) @defun_wrapped def fresnelc(ctx, z): if z == ctx.inf: return ctx.mpf(0.5) if z == ctx.ninf: return ctx.mpf(-0.5) return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
11,644
26.335681
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/__init__.py
from . import functions # Hack to update methods from . import factorials from . import hypergeometric from . import expintegrals from . import bessel from . import orthogonal from . import theta from . import elliptic from . import zeta from . import rszeta from . import zetazeros from . import qfunctions
308
21.071429
28
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/zetazeros.py
""" The function zetazero(n) computes the n-th nontrivial zero of zeta(s). The general strategy is to locate a block of Gram intervals B where we know exactly the number of zeros contained and which of those zeros is that which we search. If n <= 400 000 000 we know exactly the Rosser exceptions, contained in a list in this file. Hence for n<=400 000 000 we simply look at these list of exceptions. If our zero is implicated in one of these exceptions we have our block B. In other case we simply locate the good Rosser block containing our zero. For n > 400 000 000 we apply the method of Turing, as complemented by Lehman, Brent and Trudgian to find a suitable B. """ from .functions import defun, defun_wrapped def find_rosser_block_zero(ctx, n): """for n<400 000 000 determines a block were one find our zero""" for k in range(len(_ROSSER_EXCEPTIONS)//2): a=_ROSSER_EXCEPTIONS[2*k][0] b=_ROSSER_EXCEPTIONS[2*k][1] if ((a<= n-2) and (n-1 <= b)): t0 = ctx.grampoint(a) t1 = ctx.grampoint(b) v0 = ctx._fp.siegelz(t0) v1 = ctx._fp.siegelz(t1) my_zero_number = n-a-1 zero_number_block = b-a pattern = _ROSSER_EXCEPTIONS[2*k+1] return (my_zero_number, [a,b], [t0,t1], [v0,v1]) k = n-2 t,v,b = compute_triple_tvb(ctx, k) T = [t] V = [v] while b < 0: k -= 1 t,v,b = compute_triple_tvb(ctx, k) T.insert(0,t) V.insert(0,v) my_zero_number = n-k-1 m = n-1 t,v,b = compute_triple_tvb(ctx, m) T.append(t) V.append(v) while b < 0: m += 1 t,v,b = compute_triple_tvb(ctx, m) T.append(t) V.append(v) return (my_zero_number, [k,m], T, V) def wpzeros(t): """Precision needed to compute higher zeros""" wp = 53 if t > 3*10**8: wp = 63 if t > 10**11: wp = 70 if t > 10**14: wp = 83 return wp def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None, fp_tolerance=None): """Separate the zeros contained in the block T, limitloop determines how long one must search""" if limitloop is None: limitloop = ctx.inf loopnumber = 0 variations = count_variations(V) while ((variations < zero_number_block) and (loopnumber <limitloop)): a = T[0] v = V[0] newT = [a] newV = [v] variations = 0 for n in range(1,len(T)): b2 = T[n] u = V[n] if (u*v>0): alpha = ctx.sqrt(u/v) b= (alpha*a+b2)/(alpha+1) else: b = (a+b2)/2 if fp_tolerance < 10: w = ctx._fp.siegelz(b) if abs(w)<fp_tolerance: w = ctx.siegelz(b) else: w=ctx.siegelz(b) if v*w<0: variations += 1 newT.append(b) newV.append(w) u = V[n] if u*w <0: variations += 1 newT.append(b2) newV.append(u) a = b2 v = u T = newT V = newV loopnumber +=1 if (limitloop>ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block): dtMax=0 dtSec=0 kMax = 0 for k1 in range(1,len(T)): dt = T[k1]-T[k1-1] if dt > dtMax: kMax=k1 dtSec = dtMax dtMax = dt elif (dt<dtMax) and(dt >dtSec): dtSec = dt if dtMax>3*dtSec: f = lambda x: ctx.rs_z(x,derivative=1) t0=T[kMax-1] t1 = T[kMax] t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False) v = ctx.siegelz(t) if (t0<t) and (t<t1) and (v*V[kMax]<0): T.insert(kMax,t) V.insert(kMax,v) variations = count_variations(V) if variations == zero_number_block: separated = True else: separated = False return (T,V, separated) def separate_my_zero(ctx, my_zero_number, zero_number_block, T, V, prec): """If we know which zero of this block is mine, the function separates the zero""" variations = 0 v0 = V[0] for k in range(1,len(V)): v1 = V[k] if v0*v1 < 0: variations +=1 if variations == my_zero_number: k0 = k leftv = v0 rightv = v1 v0 = v1 t1 = T[k0] t0 = T[k0-1] ctx.prec = prec wpz = wpzeros(my_zero_number*ctx.log(my_zero_number)) guard = 4*ctx.mag(my_zero_number) precs = [ctx.prec+4] index=0 while precs[0] > 2*wpz: index +=1 precs = [precs[0] // 2 +3+2*index] + precs ctx.prec = precs[0] + guard r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False) #print "first step at", ctx.dps, "digits" z=ctx.mpc(0.5,r) for prec in precs[1:]: ctx.prec = prec + guard #print "refining to", ctx.dps, "digits" znew = z - ctx.zeta(z) / ctx.zeta(z, derivative=1) #print "difference", ctx.nstr(abs(z-znew)) z=ctx.mpc(0.5,ctx.im(znew)) return ctx.im(z) def sure_number_block(ctx, n): """The number of good Rosser blocks needed to apply Turing method References: R. P. Brent, On the Zeros of the Riemann Zeta Function in the Critical Strip, Math. Comp. 33 (1979) 1361--1372 T. Trudgian, Improvements to Turing Method, Math. Comp.""" if n < 9*10**5: return(2) g = ctx.grampoint(n-100) lg = ctx._fp.ln(g) brent = 0.0061 * lg**2 +0.08*lg trudgian = 0.0031 * lg**2 +0.11*lg N = ctx.ceil(min(brent,trudgian)) N = int(N) return N def compute_triple_tvb(ctx, n): t = ctx.grampoint(n) v = ctx._fp.siegelz(t) if ctx.mag(abs(v))<ctx.mag(t)-45: v = ctx.siegelz(t) b = v*(-1)**n return t,v,b ITERATION_LIMIT = 4 def search_supergood_block(ctx, n, fp_tolerance): """To use for n>400 000 000""" sb = sure_number_block(ctx, n) number_goodblocks = 0 m2 = n-1 t, v, b = compute_triple_tvb(ctx, m2) Tf = [t] Vf = [v] while b < 0: m2 += 1 t,v,b = compute_triple_tvb(ctx, m2) Tf.append(t) Vf.append(v) goodpoints = [m2] T = [t] V = [v] while number_goodblocks < 2*sb: m2 += 1 t, v, b = compute_triple_tvb(ctx, m2) T.append(t) V.append(v) while b < 0: m2 += 1 t,v,b = compute_triple_tvb(ctx, m2) T.append(t) V.append(v) goodpoints.append(m2) zn = len(T)-1 A, B, separated =\ separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) Tf.pop() Tf.extend(A) Vf.pop() Vf.extend(B) if separated: number_goodblocks += 1 else: number_goodblocks = 0 T = [t] V = [v] # Now the same procedure to the left number_goodblocks = 0 m2 = n-2 t, v, b = compute_triple_tvb(ctx, m2) Tf.insert(0,t) Vf.insert(0,v) while b < 0: m2 -= 1 t,v,b = compute_triple_tvb(ctx, m2) Tf.insert(0,t) Vf.insert(0,v) goodpoints.insert(0,m2) T = [t] V = [v] while number_goodblocks < 2*sb: m2 -= 1 t, v, b = compute_triple_tvb(ctx, m2) T.insert(0,t) V.insert(0,v) while b < 0: m2 -= 1 t,v,b = compute_triple_tvb(ctx, m2) T.insert(0,t) V.insert(0,v) goodpoints.insert(0,m2) zn = len(T)-1 A, B, separated =\ separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) A.pop() Tf = A+Tf B.pop() Vf = B+Vf if separated: number_goodblocks += 1 else: number_goodblocks = 0 T = [t] V = [v] r = goodpoints[2*sb] lg = len(goodpoints) s = goodpoints[lg-2*sb-1] tr, vr, br = compute_triple_tvb(ctx, r) ar = Tf.index(tr) ts, vs, bs = compute_triple_tvb(ctx, s) as1 = Tf.index(ts) T = Tf[ar:as1+1] V = Vf[ar:as1+1] zn = s-r A, B, separated =\ separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) if separated: return (n-r-1,[r,s],A,B) q = goodpoints[sb] lg = len(goodpoints) t = goodpoints[lg-sb-1] tq, vq, bq = compute_triple_tvb(ctx, q) aq = Tf.index(tq) tt, vt, bt = compute_triple_tvb(ctx, t) at = Tf.index(tt) T = Tf[aq:at+1] V = Vf[aq:at+1] return (n-q-1,[q,t],T,V) def count_variations(V): count = 0 vold = V[0] for n in range(1, len(V)): vnew = V[n] if vold*vnew < 0: count +=1 vold = vnew return count def pattern_construct(ctx, block, T, V): pattern = '(' a = block[0] b = block[1] t0,v0,b0 = compute_triple_tvb(ctx, a) k = 0 k0 = 0 for n in range(a+1,b+1): t1,v1,b1 = compute_triple_tvb(ctx, n) lgT =len(T) while (k < lgT) and (T[k] <= t1): k += 1 L = V[k0:k] L.append(v1) L.insert(0,v0) count = count_variations(L) pattern = pattern + ("%s" % count) if b1 > 0: pattern = pattern + ')(' k0 = k t0,v0,b0 = t1,v1,b1 pattern = pattern[:-1] return pattern @defun def zetazero(ctx, n, info=False, round=True): r""" Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line, i.e. returns an approximation of the `n`-th largest complex number `s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`). **Examples** The first few zeros:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> zetazero(1) (0.5 + 14.13472514173469379045725j) >>> zetazero(2) (0.5 + 21.02203963877155499262848j) >>> zetazero(20) (0.5 + 77.14484006887480537268266j) Verifying that the values are zeros:: >>> for n in range(1,5): ... s = zetazero(n) ... chop(zeta(s)), chop(siegelz(s.imag)) ... (0.0, 0.0) (0.0, 0.0) (0.0, 0.0) (0.0, 0.0) Negative indices give the conjugate zeros (`n = 0` is undefined):: >>> zetazero(-1) (0.5 - 14.13472514173469379045725j) :func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision:: >>> mp.dps = 15 >>> zetazero(1234567) (0.5 + 727690.906948208j) >>> mp.dps = 50 >>> zetazero(1234567) (0.5 + 727690.9069482075392389420041147142092708393819935j) >>> chop(zeta(_)/_) 0.0 with *info=True*, :func:`~mpmath.zetazero` gives additional information:: >>> mp.dps = 15 >>> zetazero(542964976,info=True) ((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)') This means that the zero is between Gram points 542964969 and 542964978; it is the 6-th zero between them. Finally (01311110) is the pattern of zeros in this interval. The numbers indicate the number of zeros in each Gram interval (Rosser blocks between parenthesis). In this case there is only one Rosser block of length nine. """ n = int(n) if n < 0: return ctx.zetazero(-n).conjugate() if n == 0: raise ValueError("n must be nonzero") wpinitial = ctx.prec try: wpz, fp_tolerance = comp_fp_tolerance(ctx, n) ctx.prec = wpz if n < 400000000: my_zero_number, block, T, V =\ find_rosser_block_zero(ctx, n) else: my_zero_number, block, T, V =\ search_supergood_block(ctx, n, fp_tolerance) zero_number_block = block[1]-block[0] T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=ctx.inf, fp_tolerance=fp_tolerance) if info: pattern = pattern_construct(ctx,block,T,V) prec = max(wpinitial, wpz) t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec) v = ctx.mpc(0.5,t) finally: ctx.prec = wpinitial if round: v =+v if info: return (v,block,my_zero_number,pattern) else: return v def gram_index(ctx, t): if t > 10**13: wp = 3*ctx.log(t, 10) else: wp = 0 prec = ctx.prec try: ctx.prec += wp x0 = (t/(2*ctx.pi))*ctx.log(t/(2*ctx.pi)) h = ctx.findroot(lambda x:ctx.siegeltheta(t)-ctx.pi*x, x0) h = int(h) finally: ctx.prec = prec return(h) def count_to(ctx, t, T, V): count = 0 vold = V[0] told = T[0] tnew = T[1] k = 1 while tnew < t: vnew = V[k] if vold*vnew < 0: count += 1 vold = vnew k += 1 tnew = T[k] a = ctx.siegelz(t) if a*vold < 0: count += 1 return count def comp_fp_tolerance(ctx, n): wpz = wpzeros(n*ctx.log(n)) if n < 15*10**8: fp_tolerance = 0.0005 elif n <= 10**14: fp_tolerance = 0.1 else: fp_tolerance = 100 return wpz, fp_tolerance @defun def nzeros(ctx, t): r""" Computes the number of zeros of the Riemann zeta function in `(0,1) \times (0,t]`, usually denoted by `N(t)`. **Examples** The first zero has imaginary part between 14 and 15:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nzeros(14) 0 >>> nzeros(15) 1 >>> zetazero(1) (0.5 + 14.1347251417347j) Some closely spaced zeros:: >>> nzeros(10**7) 21136125 >>> zetazero(21136125) (0.5 + 9999999.32718175j) >>> zetazero(21136126) (0.5 + 10000000.2400236j) >>> nzeros(545439823.215) 1500000001 >>> zetazero(1500000001) (0.5 + 545439823.201985j) >>> zetazero(1500000002) (0.5 + 545439823.325697j) This confirms the data given by J. van de Lune, H. J. J. te Riele and D. T. Winter in 1986. """ if t < 14.1347251417347: return 0 x = gram_index(ctx, t) k = int(ctx.floor(x)) wpinitial = ctx.prec wpz, fp_tolerance = comp_fp_tolerance(ctx, k) ctx.prec = wpz a = ctx.siegelz(t) if k == -1 and a < 0: return 0 elif k == -1 and a > 0: return 1 if k+2 < 400000000: Rblock = find_rosser_block_zero(ctx, k+2) else: Rblock = search_supergood_block(ctx, k+2, fp_tolerance) n1, n2 = Rblock[1] if n2-n1 == 1: b = Rblock[3][0] if a*b > 0: ctx.prec = wpinitial return k+1 else: ctx.prec = wpinitial return k+2 my_zero_number,block, T, V = Rblock zero_number_block = n2-n1 T, V, separated = separate_zeros_in_block(ctx,\ zero_number_block, T, V,\ limitloop=ctx.inf,\ fp_tolerance=fp_tolerance) n = count_to(ctx, t, T, V) ctx.prec = wpinitial return n+n1+1 @defun_wrapped def backlunds(ctx, t): r""" Computes the function `S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`. See Titchmarsh Section 9.3 for details of the definition. **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> backlunds(217.3) 0.16302205431184 Generally, the value is a small number. At Gram points it is an integer, frequently equal to 0:: >>> chop(backlunds(grampoint(200))) 0.0 >>> backlunds(extraprec(10)(grampoint)(211)) 1.0 >>> backlunds(extraprec(10)(grampoint)(232)) -1.0 The number of zeros of the Riemann zeta function up to height `t` satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and :func:`siegeltheta`):: >>> t = 1234.55 >>> nzeros(t) 842 >>> siegeltheta(t)/pi+1+backlunds(t) 842.0 """ return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi """ _ROSSER_EXCEPTIONS is a list of all exceptions to Rosser's rule for n <= 400 000 000. Alternately the entry is of type [n,m], or a string. The string is the zero pattern of the Block and the relevant adjacent. For example (010)3 corresponds to a block composed of three Gram intervals, the first ant third without a zero and the intermediate with a zero. The next Gram interval contain three zeros. So that in total we have 4 zeros in 4 Gram blocks. n and m are the indices of the Gram points of this interval of four Gram intervals. The Rosser exception is therefore formed by the three Gram intervals that are signaled between parenthesis. We have included also some Rosser's exceptions beyond n=400 000 000 that are noted in the literature by some reason. The list is composed from the data published in the references: R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter, 'On the Zeros of the Riemann Zeta Function in the Critical Strip. II', Math. Comp. 39 (1982) 681--688. See also Corrigenda in Math. Comp. 46 (1986) 771. J. van de Lune, H. J. J. te Riele, 'On the Zeros of the Riemann Zeta Function in the Critical Strip. III', Math. Comp. 41 (1983) 759--767. See also Corrigenda in Math. Comp. 46 (1986) 771. J. van de Lune, 'Sums of Equal Powers of Positive Integers', Dissertation, Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica, Amsterdam, 1984. Thanks to the authors all this papers and those others that have contributed to make this possible. """ _ROSSER_EXCEPTIONS = \ [[13999525, 13999528], '(00)3', [30783329, 30783332], '(00)3', [30930926, 30930929], '3(00)', [37592215, 37592218], '(00)3', [40870156, 40870159], '(00)3', [43628107, 43628110], '(00)3', [46082042, 46082045], '(00)3', [46875667, 46875670], '(00)3', [49624540, 49624543], '3(00)', [50799238, 50799241], '(00)3', [55221453, 55221456], '3(00)', [56948779, 56948782], '3(00)', [60515663, 60515666], '(00)3', [61331766, 61331770], '(00)40', [69784843, 69784846], '3(00)', [75052114, 75052117], '(00)3', [79545240, 79545243], '3(00)', [79652247, 79652250], '3(00)', [83088043, 83088046], '(00)3', [83689522, 83689525], '3(00)', [85348958, 85348961], '(00)3', [86513820, 86513823], '(00)3', [87947596, 87947599], '3(00)', [88600095, 88600098], '(00)3', [93681183, 93681186], '(00)3', [100316551, 100316554], '3(00)', [100788444, 100788447], '(00)3', [106236172, 106236175], '(00)3', [106941327, 106941330], '3(00)', [107287955, 107287958], '(00)3', [107532016, 107532019], '3(00)', [110571044, 110571047], '(00)3', [111885253, 111885256], '3(00)', [113239783, 113239786], '(00)3', [120159903, 120159906], '(00)3', [121424391, 121424394], '3(00)', [121692931, 121692934], '3(00)', [121934170, 121934173], '3(00)', [122612848, 122612851], '3(00)', [126116567, 126116570], '(00)3', [127936513, 127936516], '(00)3', [128710277, 128710280], '3(00)', [129398902, 129398905], '3(00)', [130461096, 130461099], '3(00)', [131331947, 131331950], '3(00)', [137334071, 137334074], '3(00)', [137832603, 137832606], '(00)3', [138799471, 138799474], '3(00)', [139027791, 139027794], '(00)3', [141617806, 141617809], '(00)3', [144454931, 144454934], '(00)3', [145402379, 145402382], '3(00)', [146130245, 146130248], '3(00)', [147059770, 147059773], '(00)3', [147896099, 147896102], '3(00)', [151097113, 151097116], '(00)3', [152539438, 152539441], '(00)3', [152863168, 152863171], '3(00)', [153522726, 153522729], '3(00)', [155171524, 155171527], '3(00)', [155366607, 155366610], '(00)3', [157260686, 157260689], '3(00)', [157269224, 157269227], '(00)3', [157755123, 157755126], '(00)3', [158298484, 158298487], '3(00)', [160369050, 160369053], '3(00)', [162962787, 162962790], '(00)3', [163724709, 163724712], '(00)3', [164198113, 164198116], '3(00)', [164689301, 164689305], '(00)40', [164880228, 164880231], '3(00)', [166201932, 166201935], '(00)3', [168573836, 168573839], '(00)3', [169750763, 169750766], '(00)3', [170375507, 170375510], '(00)3', [170704879, 170704882], '3(00)', [172000992, 172000995], '3(00)', [173289941, 173289944], '(00)3', [173737613, 173737616], '3(00)', [174102513, 174102516], '(00)3', [174284990, 174284993], '(00)3', [174500513, 174500516], '(00)3', [175710609, 175710612], '(00)3', [176870843, 176870846], '3(00)', [177332732, 177332735], '3(00)', [177902861, 177902864], '3(00)', [179979095, 179979098], '(00)3', [181233726, 181233729], '3(00)', [181625435, 181625438], '(00)3', [182105255, 182105259], '22(00)', [182223559, 182223562], '3(00)', [191116404, 191116407], '3(00)', [191165599, 191165602], '3(00)', [191297535, 191297539], '(00)22', [192485616, 192485619], '(00)3', [193264634, 193264638], '22(00)', [194696968, 194696971], '(00)3', [195876805, 195876808], '(00)3', [195916548, 195916551], '3(00)', [196395160, 196395163], '3(00)', [196676303, 196676306], '(00)3', [197889882, 197889885], '3(00)', [198014122, 198014125], '(00)3', [199235289, 199235292], '(00)3', [201007375, 201007378], '(00)3', [201030605, 201030608], '3(00)', [201184290, 201184293], '3(00)', [201685414, 201685418], '(00)22', [202762875, 202762878], '3(00)', [202860957, 202860960], '3(00)', [203832577, 203832580], '3(00)', [205880544, 205880547], '(00)3', [206357111, 206357114], '(00)3', [207159767, 207159770], '3(00)', [207167343, 207167346], '3(00)', [207482539, 207482543], '3(010)', [207669540, 207669543], '3(00)', [208053426, 208053429], '(00)3', [208110027, 208110030], '3(00)', [209513826, 209513829], '3(00)', [212623522, 212623525], '(00)3', [213841715, 213841718], '(00)3', [214012333, 214012336], '(00)3', [214073567, 214073570], '(00)3', [215170600, 215170603], '3(00)', [215881039, 215881042], '3(00)', [216274604, 216274607], '3(00)', [216957120, 216957123], '3(00)', [217323208, 217323211], '(00)3', [218799264, 218799267], '(00)3', [218803557, 218803560], '3(00)', [219735146, 219735149], '(00)3', [219830062, 219830065], '3(00)', [219897904, 219897907], '(00)3', [221205545, 221205548], '(00)3', [223601929, 223601932], '(00)3', [223907076, 223907079], '3(00)', [223970397, 223970400], '(00)3', [224874044, 224874048], '22(00)', [225291157, 225291160], '(00)3', [227481734, 227481737], '(00)3', [228006442, 228006445], '3(00)', [228357900, 228357903], '(00)3', [228386399, 228386402], '(00)3', [228907446, 228907449], '(00)3', [228984552, 228984555], '3(00)', [229140285, 229140288], '3(00)', [231810024, 231810027], '(00)3', [232838062, 232838065], '3(00)', [234389088, 234389091], '3(00)', [235588194, 235588197], '(00)3', [236645695, 236645698], '(00)3', [236962876, 236962879], '3(00)', [237516723, 237516727], '04(00)', [240004911, 240004914], '(00)3', [240221306, 240221309], '3(00)', [241389213, 241389217], '(010)3', [241549003, 241549006], '(00)3', [241729717, 241729720], '(00)3', [241743684, 241743687], '3(00)', [243780200, 243780203], '3(00)', [243801317, 243801320], '(00)3', [244122072, 244122075], '(00)3', [244691224, 244691227], '3(00)', [244841577, 244841580], '(00)3', [245813461, 245813464], '(00)3', [246299475, 246299478], '(00)3', [246450176, 246450179], '3(00)', [249069349, 249069352], '(00)3', [250076378, 250076381], '(00)3', [252442157, 252442160], '3(00)', [252904231, 252904234], '3(00)', [255145220, 255145223], '(00)3', [255285971, 255285974], '3(00)', [256713230, 256713233], '(00)3', [257992082, 257992085], '(00)3', [258447955, 258447959], '22(00)', [259298045, 259298048], '3(00)', [262141503, 262141506], '(00)3', [263681743, 263681746], '3(00)', [266527881, 266527885], '(010)3', [266617122, 266617125], '(00)3', [266628044, 266628047], '3(00)', [267305763, 267305766], '(00)3', [267388404, 267388407], '3(00)', [267441672, 267441675], '3(00)', [267464886, 267464889], '(00)3', [267554907, 267554910], '3(00)', [269787480, 269787483], '(00)3', [270881434, 270881437], '(00)3', [270997583, 270997586], '3(00)', [272096378, 272096381], '3(00)', [272583009, 272583012], '(00)3', [274190881, 274190884], '3(00)', [274268747, 274268750], '(00)3', [275297429, 275297432], '3(00)', [275545476, 275545479], '3(00)', [275898479, 275898482], '3(00)', [275953000, 275953003], '(00)3', [277117197, 277117201], '(00)22', [277447310, 277447313], '3(00)', [279059657, 279059660], '3(00)', [279259144, 279259147], '3(00)', [279513636, 279513639], '3(00)', [279849069, 279849072], '3(00)', [280291419, 280291422], '(00)3', [281449425, 281449428], '3(00)', [281507953, 281507956], '3(00)', [281825600, 281825603], '(00)3', [282547093, 282547096], '3(00)', [283120963, 283120966], '3(00)', [283323493, 283323496], '(00)3', [284764535, 284764538], '3(00)', [286172639, 286172642], '3(00)', [286688824, 286688827], '(00)3', [287222172, 287222175], '3(00)', [287235534, 287235537], '3(00)', [287304861, 287304864], '3(00)', [287433571, 287433574], '(00)3', [287823551, 287823554], '(00)3', [287872422, 287872425], '3(00)', [288766615, 288766618], '3(00)', [290122963, 290122966], '3(00)', [290450849, 290450853], '(00)22', [291426141, 291426144], '3(00)', [292810353, 292810356], '3(00)', [293109861, 293109864], '3(00)', [293398054, 293398057], '3(00)', [294134426, 294134429], '3(00)', [294216438, 294216441], '(00)3', [295367141, 295367144], '3(00)', [297834111, 297834114], '3(00)', [299099969, 299099972], '3(00)', [300746958, 300746961], '3(00)', [301097423, 301097426], '(00)3', [301834209, 301834212], '(00)3', [302554791, 302554794], '(00)3', [303497445, 303497448], '3(00)', [304165344, 304165347], '3(00)', [304790218, 304790222], '3(010)', [305302352, 305302355], '(00)3', [306785996, 306785999], '3(00)', [307051443, 307051446], '3(00)', [307481539, 307481542], '3(00)', [308605569, 308605572], '3(00)', [309237610, 309237613], '3(00)', [310509287, 310509290], '(00)3', [310554057, 310554060], '3(00)', [310646345, 310646348], '3(00)', [311274896, 311274899], '(00)3', [311894272, 311894275], '3(00)', [312269470, 312269473], '(00)3', [312306601, 312306605], '(00)40', [312683193, 312683196], '3(00)', [314499804, 314499807], '3(00)', [314636802, 314636805], '(00)3', [314689897, 314689900], '3(00)', [314721319, 314721322], '3(00)', [316132890, 316132893], '3(00)', [316217470, 316217474], '(010)3', [316465705, 316465708], '3(00)', [316542790, 316542793], '(00)3', [320822347, 320822350], '3(00)', [321733242, 321733245], '3(00)', [324413970, 324413973], '(00)3', [325950140, 325950143], '(00)3', [326675884, 326675887], '(00)3', [326704208, 326704211], '3(00)', [327596247, 327596250], '3(00)', [328123172, 328123175], '3(00)', [328182212, 328182215], '(00)3', [328257498, 328257501], '3(00)', [328315836, 328315839], '(00)3', [328800974, 328800977], '(00)3', [328998509, 328998512], '3(00)', [329725370, 329725373], '(00)3', [332080601, 332080604], '(00)3', [332221246, 332221249], '(00)3', [332299899, 332299902], '(00)3', [332532822, 332532825], '(00)3', [333334544, 333334548], '(00)22', [333881266, 333881269], '3(00)', [334703267, 334703270], '3(00)', [334875138, 334875141], '3(00)', [336531451, 336531454], '3(00)', [336825907, 336825910], '(00)3', [336993167, 336993170], '(00)3', [337493998, 337494001], '3(00)', [337861034, 337861037], '3(00)', [337899191, 337899194], '(00)3', [337958123, 337958126], '(00)3', [342331982, 342331985], '3(00)', [342676068, 342676071], '3(00)', [347063781, 347063784], '3(00)', [347697348, 347697351], '3(00)', [347954319, 347954322], '3(00)', [348162775, 348162778], '3(00)', [349210702, 349210705], '(00)3', [349212913, 349212916], '3(00)', [349248650, 349248653], '(00)3', [349913500, 349913503], '3(00)', [350891529, 350891532], '3(00)', [351089323, 351089326], '3(00)', [351826158, 351826161], '3(00)', [352228580, 352228583], '(00)3', [352376244, 352376247], '3(00)', [352853758, 352853761], '(00)3', [355110439, 355110442], '(00)3', [355808090, 355808094], '(00)40', [355941556, 355941559], '3(00)', [356360231, 356360234], '(00)3', [356586657, 356586660], '3(00)', [356892926, 356892929], '(00)3', [356908232, 356908235], '3(00)', [357912730, 357912733], '3(00)', [358120344, 358120347], '3(00)', [359044096, 359044099], '(00)3', [360819357, 360819360], '3(00)', [361399662, 361399666], '(010)3', [362361315, 362361318], '(00)3', [363610112, 363610115], '(00)3', [363964804, 363964807], '3(00)', [364527375, 364527378], '(00)3', [365090327, 365090330], '(00)3', [365414539, 365414542], '3(00)', [366738474, 366738477], '3(00)', [368714778, 368714783], '04(010)', [368831545, 368831548], '(00)3', [368902387, 368902390], '(00)3', [370109769, 370109772], '3(00)', [370963333, 370963336], '3(00)', [372541136, 372541140], '3(010)', [372681562, 372681565], '(00)3', [373009410, 373009413], '(00)3', [373458970, 373458973], '3(00)', [375648658, 375648661], '3(00)', [376834728, 376834731], '3(00)', [377119945, 377119948], '(00)3', [377335703, 377335706], '(00)3', [378091745, 378091748], '3(00)', [379139522, 379139525], '3(00)', [380279160, 380279163], '(00)3', [380619442, 380619445], '3(00)', [381244231, 381244234], '3(00)', [382327446, 382327450], '(010)3', [382357073, 382357076], '3(00)', [383545479, 383545482], '3(00)', [384363766, 384363769], '(00)3', [384401786, 384401790], '22(00)', [385198212, 385198215], '3(00)', [385824476, 385824479], '(00)3', [385908194, 385908197], '3(00)', [386946806, 386946809], '3(00)', [387592175, 387592179], '22(00)', [388329293, 388329296], '(00)3', [388679566, 388679569], '3(00)', [388832142, 388832145], '3(00)', [390087103, 390087106], '(00)3', [390190926, 390190930], '(00)22', [390331207, 390331210], '3(00)', [391674495, 391674498], '3(00)', [391937831, 391937834], '3(00)', [391951632, 391951636], '(00)22', [392963986, 392963989], '(00)3', [393007921, 393007924], '3(00)', [393373210, 393373213], '3(00)', [393759572, 393759575], '(00)3', [394036662, 394036665], '(00)3', [395813866, 395813869], '(00)3', [395956690, 395956693], '3(00)', [396031670, 396031673], '3(00)', [397076433, 397076436], '3(00)', [397470601, 397470604], '3(00)', [398289458, 398289461], '3(00)', # [368714778, 368714783], '04(010)', [437953499, 437953504], '04(010)', [526196233, 526196238], '032(00)', [744719566, 744719571], '(010)40', [750375857, 750375862], '032(00)', [958241932, 958241937], '04(010)', [983377342, 983377347], '(00)410', [1003780080, 1003780085], '04(010)', [1070232754, 1070232759], '(00)230', [1209834865, 1209834870], '032(00)', [1257209100, 1257209105], '(00)410', [1368002233, 1368002238], '(00)230' ]
30,951
29.315377
103
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/functions/theta.py
from .functions import defun, defun_wrapped @defun def _jacobi_theta2(ctx, z, q): extra1 = 10 extra2 = 20 # the loops below break when the fixed precision quantities # a and b go to zero; # right shifting small negative numbers by wp one obtains -1, not zero, # so the condition a**2 + b**2 > MIN is used to break the loops. MIN = 2 if z == ctx.zero: if (not ctx._im(q)): wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) x2 = (x*x) >> wp a = b = x2 s = x2 while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp s += a s = (1 << (wp+1)) + (s << 1) s = ctx.ldexp(s, -wp) else: wp = ctx.prec + extra1 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp-1) are = bre = x2re aim = bim = x2im sre = (1<<wp) + are sim = aim while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp sre += are sim += aim sre = (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) else: if (not ctx._im(q)) and (not ctx._im(z)): wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) x2 = (x*x) >> wp a = b = x2 c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) cn = c1 = ctx.to_fixed(c1, wp) sn = s1 = ctx.to_fixed(s1, wp) c2 = (c1*c1 - s1*s1) >> wp s2 = (c1 * s1) >> (wp - 1) cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp s = c1 + ((a * cn) >> wp) while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp s += (a * cn) >> wp s = (s << 1) s = ctx.ldexp(s, -wp) s *= ctx.nthroot(q, 4) return s # case z real, q complex elif not ctx._im(z): wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = x2re aim = bim = x2im c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) cn = c1 = ctx.to_fixed(c1, wp) sn = s1 = ctx.to_fixed(s1, wp) c2 = (c1*c1 - s1*s1) >> wp s2 = (c1 * s1) >> (wp - 1) cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp sre = c1 + ((are * cn) >> wp) sim = ((aim * cn) >> wp) while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp sre += ((are * cn) >> wp) sim += ((aim * cn) >> wp) sre = (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) #case z complex, q real elif not ctx._im(q): wp = ctx.prec + extra2 x = ctx.to_fixed(ctx._re(q), wp) x2 = (x*x) >> wp a = b = x2 prec0 = ctx.prec ctx.prec = wp c1, s1 = ctx.cos_sin(z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) #c2 = (c1*c1 - s1*s1) >> wp c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) #s2 = (c1 * s1) >> (wp - 1) s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 sre = c1re + ((a * cnre) >> wp) sim = c1im + ((a * cnim) >> wp) while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 sre += ((a * cnre) >> wp) sim += ((a * cnim) >> wp) sre = (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) # case z and q complex else: wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = x2re aim = bim = x2im prec0 = ctx.prec ctx.prec = wp # cos(z), sin(z) with z complex c1, s1 = ctx.cos_sin(z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 n = 1 termre = c1re termim = c1im sre = c1re + ((are * cnre - aim * cnim) >> wp) sim = c1im + ((are * cnim + aim * cnre) >> wp) n = 3 termre = ((are * cnre - aim * cnim) >> wp) termim = ((are * cnim + aim * cnre) >> wp) sre = c1re + ((are * cnre - aim * cnim) >> wp) sim = c1im + ((are * cnim + aim * cnre) >> wp) n = 5 while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 termre = ((are * cnre - aim * cnim) >> wp) termim = ((aim * cnre + are * cnim) >> wp) sre += ((are * cnre - aim * cnim) >> wp) sim += ((aim * cnre + are * cnim) >> wp) n += 2 sre = (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) s *= ctx.nthroot(q, 4) return s @defun def _djacobi_theta2(ctx, z, q, nd): MIN = 2 extra1 = 10 extra2 = 20 if (not ctx._im(q)) and (not ctx._im(z)): wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) x2 = (x*x) >> wp a = b = x2 c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) cn = c1 = ctx.to_fixed(c1, wp) sn = s1 = ctx.to_fixed(s1, wp) c2 = (c1*c1 - s1*s1) >> wp s2 = (c1 * s1) >> (wp - 1) cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp if (nd&1): s = s1 + ((a * sn * 3**nd) >> wp) else: s = c1 + ((a * cn * 3**nd) >> wp) n = 2 while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp if nd&1: s += (a * sn * (2*n+1)**nd) >> wp else: s += (a * cn * (2*n+1)**nd) >> wp n += 1 s = -(s << 1) s = ctx.ldexp(s, -wp) # case z real, q complex elif not ctx._im(z): wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = x2re aim = bim = x2im c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) cn = c1 = ctx.to_fixed(c1, wp) sn = s1 = ctx.to_fixed(s1, wp) c2 = (c1*c1 - s1*s1) >> wp s2 = (c1 * s1) >> (wp - 1) cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp if (nd&1): sre = s1 + ((are * sn * 3**nd) >> wp) sim = ((aim * sn * 3**nd) >> wp) else: sre = c1 + ((are * cn * 3**nd) >> wp) sim = ((aim * cn * 3**nd) >> wp) n = 5 while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp if (nd&1): sre += ((are * sn * n**nd) >> wp) sim += ((aim * sn * n**nd) >> wp) else: sre += ((are * cn * n**nd) >> wp) sim += ((aim * cn * n**nd) >> wp) n += 2 sre = -(sre << 1) sim = -(sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) #case z complex, q real elif not ctx._im(q): wp = ctx.prec + extra2 x = ctx.to_fixed(ctx._re(q), wp) x2 = (x*x) >> wp a = b = x2 prec0 = ctx.prec ctx.prec = wp c1, s1 = ctx.cos_sin(z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) #c2 = (c1*c1 - s1*s1) >> wp c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) #s2 = (c1 * s1) >> (wp - 1) s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if (nd&1): sre = s1re + ((a * snre * 3**nd) >> wp) sim = s1im + ((a * snim * 3**nd) >> wp) else: sre = c1re + ((a * cnre * 3**nd) >> wp) sim = c1im + ((a * cnim * 3**nd) >> wp) n = 5 while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if (nd&1): sre += ((a * snre * n**nd) >> wp) sim += ((a * snim * n**nd) >> wp) else: sre += ((a * cnre * n**nd) >> wp) sim += ((a * cnim * n**nd) >> wp) n += 2 sre = -(sre << 1) sim = -(sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) # case z and q complex else: wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = x2re aim = bim = x2im prec0 = ctx.prec ctx.prec = wp # cos(2*z), sin(2*z) with z complex c1, s1 = ctx.cos_sin(z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if (nd&1): sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp) sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp) else: sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp) sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp) n = 5 while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if (nd&1): sre += (((are * snre - aim * snim) * n**nd) >> wp) sim += (((aim * snre + are * snim) * n**nd) >> wp) else: sre += (((are * cnre - aim * cnim) * n**nd) >> wp) sim += (((aim * cnre + are * cnim) * n**nd) >> wp) n += 2 sre = -(sre << 1) sim = -(sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) s *= ctx.nthroot(q, 4) if (nd&1): return (-1)**(nd//2) * s else: return (-1)**(1 + nd//2) * s @defun def _jacobi_theta3(ctx, z, q): extra1 = 10 extra2 = 20 MIN = 2 if z == ctx.zero: if not ctx._im(q): wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) s = x a = b = x x2 = (x*x) >> wp while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp s += a s = (1 << wp) + (s << 1) s = ctx.ldexp(s, -wp) return s else: wp = ctx.prec + extra1 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) sre = are = bre = xre sim = aim = bim = xim while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp sre += are sim += aim sre = (1 << wp) + (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) return s else: if (not ctx._im(q)) and (not ctx._im(z)): s = 0 wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) a = b = x x2 = (x*x) >> wp c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) c1 = ctx.to_fixed(c1, wp) s1 = ctx.to_fixed(s1, wp) cn = c1 sn = s1 s += (a * cn) >> wp while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp s += (a * cn) >> wp s = (1 << wp) + (s << 1) s = ctx.ldexp(s, -wp) return s # case z real, q complex elif not ctx._im(z): wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = xre aim = bim = xim c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) c1 = ctx.to_fixed(c1, wp) s1 = ctx.to_fixed(s1, wp) cn = c1 sn = s1 sre = (are * cn) >> wp sim = (aim * cn) >> wp while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp sre += (are * cn) >> wp sim += (aim * cn) >> wp sre = (1 << wp) + (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) return s #case z complex, q real elif not ctx._im(q): wp = ctx.prec + extra2 x = ctx.to_fixed(ctx._re(q), wp) a = b = x x2 = (x*x) >> wp prec0 = ctx.prec ctx.prec = wp c1, s1 = ctx.cos_sin(2*z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) sre = (a * cnre) >> wp sim = (a * cnim) >> wp while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 sre += (a * cnre) >> wp sim += (a * cnim) >> wp sre = (1 << wp) + (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) return s # case z and q complex else: wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = xre aim = bim = xim prec0 = ctx.prec ctx.prec = wp # cos(2*z), sin(2*z) with z complex c1, s1 = ctx.cos_sin(2*z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) sre = (are * cnre - aim * cnim) >> wp sim = (aim * cnre + are * cnim) >> wp while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 sre += (are * cnre - aim * cnim) >> wp sim += (aim * cnre + are * cnim) >> wp sre = (1 << wp) + (sre << 1) sim = (sim << 1) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) return s @defun def _djacobi_theta3(ctx, z, q, nd): """nd=1,2,3 order of the derivative with respect to z""" MIN = 2 extra1 = 10 extra2 = 20 if (not ctx._im(q)) and (not ctx._im(z)): s = 0 wp = ctx.prec + extra1 x = ctx.to_fixed(ctx._re(q), wp) a = b = x x2 = (x*x) >> wp c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) c1 = ctx.to_fixed(c1, wp) s1 = ctx.to_fixed(s1, wp) cn = c1 sn = s1 if (nd&1): s += (a * sn) >> wp else: s += (a * cn) >> wp n = 2 while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp if nd&1: s += (a * sn * n**nd) >> wp else: s += (a * cn * n**nd) >> wp n += 1 s = -(s << (nd+1)) s = ctx.ldexp(s, -wp) # case z real, q complex elif not ctx._im(z): wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = xre aim = bim = xim c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) c1 = ctx.to_fixed(c1, wp) s1 = ctx.to_fixed(s1, wp) cn = c1 sn = s1 if (nd&1): sre = (are * sn) >> wp sim = (aim * sn) >> wp else: sre = (are * cn) >> wp sim = (aim * cn) >> wp n = 2 while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp if nd&1: sre += (are * sn * n**nd) >> wp sim += (aim * sn * n**nd) >> wp else: sre += (are * cn * n**nd) >> wp sim += (aim * cn * n**nd) >> wp n += 1 sre = -(sre << (nd+1)) sim = -(sim << (nd+1)) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) #case z complex, q real elif not ctx._im(q): wp = ctx.prec + extra2 x = ctx.to_fixed(ctx._re(q), wp) a = b = x x2 = (x*x) >> wp prec0 = ctx.prec ctx.prec = wp c1, s1 = ctx.cos_sin(2*z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) if (nd&1): sre = (a * snre) >> wp sim = (a * snim) >> wp else: sre = (a * cnre) >> wp sim = (a * cnim) >> wp n = 2 while abs(a) > MIN: b = (b*x2) >> wp a = (a*b) >> wp t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if (nd&1): sre += (a * snre * n**nd) >> wp sim += (a * snim * n**nd) >> wp else: sre += (a * cnre * n**nd) >> wp sim += (a * cnim * n**nd) >> wp n += 1 sre = -(sre << (nd+1)) sim = -(sim << (nd+1)) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) # case z and q complex else: wp = ctx.prec + extra2 xre = ctx.to_fixed(ctx._re(q), wp) xim = ctx.to_fixed(ctx._im(q), wp) x2re = (xre*xre - xim*xim) >> wp x2im = (xre*xim) >> (wp - 1) are = bre = xre aim = bim = xim prec0 = ctx.prec ctx.prec = wp # cos(2*z), sin(2*z) with z complex c1, s1 = ctx.cos_sin(2*z) ctx.prec = prec0 cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) snre = s1re = ctx.to_fixed(ctx._re(s1), wp) snim = s1im = ctx.to_fixed(ctx._im(s1), wp) if (nd&1): sre = (are * snre - aim * snim) >> wp sim = (aim * snre + are * snim) >> wp else: sre = (are * cnre - aim * cnim) >> wp sim = (aim * cnre + are * cnim) >> wp n = 2 while are**2 + aim**2 > MIN: bre, bim = (bre * x2re - bim * x2im) >> wp, \ (bre * x2im + bim * x2re) >> wp are, aim = (are * bre - aim * bim) >> wp, \ (are * bim + aim * bre) >> wp t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp cnre = t1 cnim = t2 snre = t3 snim = t4 if(nd&1): sre += ((are * snre - aim * snim) * n**nd) >> wp sim += ((aim * snre + are * snim) * n**nd) >> wp else: sre += ((are * cnre - aim * cnim) * n**nd) >> wp sim += ((aim * cnre + are * cnim) * n**nd) >> wp n += 1 sre = -(sre << (nd+1)) sim = -(sim << (nd+1)) sre = ctx.ldexp(sre, -wp) sim = ctx.ldexp(sim, -wp) s = ctx.mpc(sre, sim) if (nd&1): return (-1)**(nd//2) * s else: return (-1)**(1 + nd//2) * s @defun def _jacobi_theta2a(ctx, z, q): """ case ctx._im(z) != 0 theta(2, z, q) = q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf) max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z) n0 = int(ctx._im(z)/log(q).real - 1/2) theta(2, z, q) = q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf) """ n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) e2 = ctx.expj(2*z) e = e0 = ctx.expj((2*n+1)*z) a = q**(n*n + n) # leading term term = a * e s = term eps1 = ctx.eps*abs(term) while 1: n += 1 e = e * e2 term = q**(n*n + n) * e if abs(term) < eps1: break s += term e = e0 e2 = ctx.expj(-2*z) n = n0 while 1: n -= 1 e = e * e2 term = q**(n*n + n) * e if abs(term) < eps1: break s += term s = s * ctx.nthroot(q, 4) return s @defun def _jacobi_theta3a(ctx, z, q): """ case ctx._im(z) != 0 theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf) max term for n*abs(log(q).real) + ctx._im(z) ~= 0 n0 = int(- ctx._im(z)/abs(log(q).real)) """ n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) e2 = ctx.expj(2*z) e = e0 = ctx.expj(2*n*z) s = term = q**(n*n) * e eps1 = ctx.eps*abs(term) while 1: n += 1 e = e * e2 term = q**(n*n) * e if abs(term) < eps1: break s += term e = e0 e2 = ctx.expj(-2*z) n = n0 while 1: n -= 1 e = e * e2 term = q**(n*n) * e if abs(term) < eps1: break s += term return s @defun def _djacobi_theta2a(ctx, z, q, nd): """ case ctx._im(z) != 0 dtheta(2, z, q, nd) = j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf) max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0 n0 = int(ctx._im(z)/log(q).real - 1/2) """ n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) e2 = ctx.expj(2*z) e = e0 = ctx.expj((2*n + 1)*z) a = q**(n*n + n) # leading term term = (2*n+1)**nd * a * e s = term eps1 = ctx.eps*abs(term) while 1: n += 1 e = e * e2 term = (2*n+1)**nd * q**(n*n + n) * e if abs(term) < eps1: break s += term e = e0 e2 = ctx.expj(-2*z) n = n0 while 1: n -= 1 e = e * e2 term = (2*n+1)**nd * q**(n*n + n) * e if abs(term) < eps1: break s += term return ctx.j**nd * s * ctx.nthroot(q, 4) @defun def _djacobi_theta3a(ctx, z, q, nd): """ case ctx._im(z) != 0 djtheta3(z, q, nd) = (2*j)**nd * Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf) max term for minimum n*abs(log(q).real) + ctx._im(z) """ n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) e2 = ctx.expj(2*z) e = e0 = ctx.expj(2*n*z) a = q**(n*n) * e s = term = n**nd * a if n != 0: eps1 = ctx.eps*abs(term) else: eps1 = ctx.eps*abs(a) while 1: n += 1 e = e * e2 a = q**(n*n) * e term = n**nd * a if n != 0: aterm = abs(term) else: aterm = abs(a) if aterm < eps1: break s += term e = e0 e2 = ctx.expj(-2*z) n = n0 while 1: n -= 1 e = e * e2 a = q**(n*n) * e term = n**nd * a if n != 0: aterm = abs(term) else: aterm = abs(a) if aterm < eps1: break s += term return (2*ctx.j)**nd * s @defun def jtheta(ctx, n, z, q, derivative=0): if derivative: return ctx._djtheta(n, z, q, derivative) z = ctx.convert(z) q = ctx.convert(q) # Implementation note # If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3 # are used, # which compute the series starting from n=0 using fixed precision # numbers; # otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute # the series starting from n=n0, which is the largest term. # TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point if abs(q) > ctx.THETA_Q_LIM: raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) extra = 10 if z: M = ctx.mag(z) if M > 5 or (n == 1 and M < -5): extra += 2*abs(M) cz = 0.5 extra2 = 50 prec0 = ctx.prec try: ctx.prec += extra if n == 1: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._jacobi_theta2(z - ctx.pi/2, q) else: ctx.dps += 10 res = ctx._jacobi_theta2a(z - ctx.pi/2, q) else: res = ctx._jacobi_theta2(z - ctx.pi/2, q) elif n == 2: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._jacobi_theta2(z, q) else: ctx.dps += 10 res = ctx._jacobi_theta2a(z, q) else: res = ctx._jacobi_theta2(z, q) elif n == 3: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._jacobi_theta3(z, q) else: ctx.dps += 10 res = ctx._jacobi_theta3a(z, q) else: res = ctx._jacobi_theta3(z, q) elif n == 4: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._jacobi_theta3(z, -q) else: ctx.dps += 10 res = ctx._jacobi_theta3a(z, -q) else: res = ctx._jacobi_theta3(z, -q) else: raise ValueError finally: ctx.prec = prec0 return res @defun def _djtheta(ctx, n, z, q, derivative=1): z = ctx.convert(z) q = ctx.convert(q) nd = int(derivative) if abs(q) > ctx.THETA_Q_LIM: raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) extra = 10 + ctx.prec * nd // 10 if z: M = ctx.mag(z) if M > 5 or (n != 1 and M < -5): extra += 2*abs(M) cz = 0.5 extra2 = 50 prec0 = ctx.prec try: ctx.prec += extra if n == 1: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) else: ctx.dps += 10 res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd) else: res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) elif n == 2: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._djacobi_theta2(z, q, nd) else: ctx.dps += 10 res = ctx._djacobi_theta2a(z, q, nd) else: res = ctx._djacobi_theta2(z, q, nd) elif n == 3: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._djacobi_theta3(z, q, nd) else: ctx.dps += 10 res = ctx._djacobi_theta3a(z, q, nd) else: res = ctx._djacobi_theta3(z, q, nd) elif n == 4: if ctx._im(z): if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): ctx.dps += extra2 res = ctx._djacobi_theta3(z, -q, nd) else: ctx.dps += 10 res = ctx._djacobi_theta3a(z, -q, nd) else: res = ctx._djacobi_theta3(z, -q, nd) else: raise ValueError finally: ctx.prec = prec0 return +res
37,320
34.54381
76
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/calculus.py
from ..libmp.backend import xrange # TODO: should use diagonalization-based algorithms class MatrixCalculusMethods(object): def _exp_pade(ctx, a): """ Exponential of a matrix using Pade approximants. See G. H. Golub, C. F. van Loan 'Matrix Computations', third Ed., page 572 TODO: - find a good estimate for q - reduce the number of matrix multiplications to improve performance """ def eps_pade(p): return ctx.mpf(2)**(3-2*p) * \ ctx.factorial(p)**2/(ctx.factorial(2*p)**2 * (2*p + 1)) q = 4 extraq = 8 while 1: if eps_pade(q) < ctx.eps: break q += 1 q += extraq j = int(max(1, ctx.mag(ctx.mnorm(a,'inf')))) extra = q prec = ctx.prec ctx.dps += extra + 3 try: a = a/2**j na = a.rows den = ctx.eye(na) num = ctx.eye(na) x = ctx.eye(na) c = ctx.mpf(1) for k in range(1, q+1): c *= ctx.mpf(q - k + 1)/((2*q - k + 1) * k) x = a*x cx = c*x num += cx den += (-1)**k * cx f = ctx.lu_solve_mat(den, num) for k in range(j): f = f*f finally: ctx.prec = prec return f*1 def expm(ctx, A, method='taylor'): r""" Computes the matrix exponential of a square matrix `A`, which is defined by the power series .. math :: \exp(A) = I + A + \frac{A^2}{2!} + \frac{A^3}{3!} + \ldots With method='taylor', the matrix exponential is computed using the Taylor series. With method='pade', Pade approximants are used instead. **Examples** Basic examples:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> expm(zeros(3)) [1.0 0.0 0.0] [0.0 1.0 0.0] [0.0 0.0 1.0] >>> expm(eye(3)) [2.71828182845905 0.0 0.0] [ 0.0 2.71828182845905 0.0] [ 0.0 0.0 2.71828182845905] >>> expm([[1,1,0],[1,0,1],[0,1,0]]) [ 3.86814500615414 2.26812870852145 0.841130841230196] [ 2.26812870852145 2.44114713886289 1.42699786729125] [0.841130841230196 1.42699786729125 1.6000162976327] >>> expm([[1,1,0],[1,0,1],[0,1,0]], method='pade') [ 3.86814500615414 2.26812870852145 0.841130841230196] [ 2.26812870852145 2.44114713886289 1.42699786729125] [0.841130841230196 1.42699786729125 1.6000162976327] >>> expm([[1+j, 0], [1+j,1]]) [(1.46869393991589 + 2.28735528717884j) 0.0] [ (1.03776739863568 + 3.536943175722j) (2.71828182845905 + 0.0j)] Matrices with large entries are allowed:: >>> expm(matrix([[1,2],[2,3]])**25) [5.65024064048415e+2050488462815550 9.14228140091932e+2050488462815550] [9.14228140091932e+2050488462815550 1.47925220414035e+2050488462815551] The identity `\exp(A+B) = \exp(A) \exp(B)` does not hold for noncommuting matrices:: >>> A = hilbert(3) >>> B = A + eye(3) >>> chop(mnorm(A*B - B*A)) 0.0 >>> chop(mnorm(expm(A+B) - expm(A)*expm(B))) 0.0 >>> B = A + ones(3) >>> mnorm(A*B - B*A) 1.8 >>> mnorm(expm(A+B) - expm(A)*expm(B)) 42.0927851137247 """ if method == 'pade': prec = ctx.prec try: A = ctx.matrix(A) ctx.prec += 2*A.rows res = ctx._exp_pade(A) finally: ctx.prec = prec return res A = ctx.matrix(A) prec = ctx.prec j = int(max(1, ctx.mag(ctx.mnorm(A,'inf')))) j += int(0.5*prec**0.5) try: ctx.prec += 10 + 2*j tol = +ctx.eps A = A/2**j T = A Y = A**0 + A k = 2 while 1: T *= A * (1/ctx.mpf(k)) if ctx.mnorm(T, 'inf') < tol: break Y += T k += 1 for k in xrange(j): Y = Y*Y finally: ctx.prec = prec Y *= 1 return Y def cosm(ctx, A): r""" Gives the cosine of a square matrix `A`, defined in analogy with the matrix exponential. Examples:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> X = eye(3) >>> cosm(X) [0.54030230586814 0.0 0.0] [ 0.0 0.54030230586814 0.0] [ 0.0 0.0 0.54030230586814] >>> X = hilbert(3) >>> cosm(X) [ 0.424403834569555 -0.316643413047167 -0.221474945949293] [-0.316643413047167 0.820646708837824 -0.127183694770039] [-0.221474945949293 -0.127183694770039 0.909236687217541] >>> X = matrix([[1+j,-2],[0,-j]]) >>> cosm(X) [(0.833730025131149 - 0.988897705762865j) (1.07485840848393 - 0.17192140544213j)] [ 0.0 (1.54308063481524 + 0.0j)] """ B = 0.5 * (ctx.expm(A*ctx.j) + ctx.expm(A*(-ctx.j))) if not sum(A.apply(ctx.im).apply(abs)): B = B.apply(ctx.re) return B def sinm(ctx, A): r""" Gives the sine of a square matrix `A`, defined in analogy with the matrix exponential. Examples:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> X = eye(3) >>> sinm(X) [0.841470984807897 0.0 0.0] [ 0.0 0.841470984807897 0.0] [ 0.0 0.0 0.841470984807897] >>> X = hilbert(3) >>> sinm(X) [0.711608512150994 0.339783913247439 0.220742837314741] [0.339783913247439 0.244113865695532 0.187231271174372] [0.220742837314741 0.187231271174372 0.155816730769635] >>> X = matrix([[1+j,-2],[0,-j]]) >>> sinm(X) [(1.29845758141598 + 0.634963914784736j) (-1.96751511930922 + 0.314700021761367j)] [ 0.0 (0.0 - 1.1752011936438j)] """ B = (-0.5j) * (ctx.expm(A*ctx.j) - ctx.expm(A*(-ctx.j))) if not sum(A.apply(ctx.im).apply(abs)): B = B.apply(ctx.re) return B def _sqrtm_rot(ctx, A, _may_rotate): # If the iteration fails to converge, cheat by performing # a rotation by a complex number u = ctx.j**0.3 return ctx.sqrtm(u*A, _may_rotate) / ctx.sqrt(u) def sqrtm(ctx, A, _may_rotate=2): r""" Computes a square root of the square matrix `A`, i.e. returns a matrix `B = A^{1/2}` such that `B^2 = A`. The square root of a matrix, if it exists, is not unique. **Examples** Square roots of some simple matrices:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> sqrtm([[1,0], [0,1]]) [1.0 0.0] [0.0 1.0] >>> sqrtm([[0,0], [0,0]]) [0.0 0.0] [0.0 0.0] >>> sqrtm([[2,0],[0,1]]) [1.4142135623731 0.0] [ 0.0 1.0] >>> sqrtm([[1,1],[1,0]]) [ (0.920442065259926 - 0.21728689675164j) (0.568864481005783 + 0.351577584254143j)] [(0.568864481005783 + 0.351577584254143j) (0.351577584254143 - 0.568864481005783j)] >>> sqrtm([[1,0],[0,1]]) [1.0 0.0] [0.0 1.0] >>> sqrtm([[-1,0],[0,1]]) [(0.0 - 1.0j) 0.0] [ 0.0 (1.0 + 0.0j)] >>> sqrtm([[j,0],[0,j]]) [(0.707106781186547 + 0.707106781186547j) 0.0] [ 0.0 (0.707106781186547 + 0.707106781186547j)] A square root of a rotation matrix, giving the corresponding half-angle rotation matrix:: >>> t1 = 0.75 >>> t2 = t1 * 0.5 >>> A1 = matrix([[cos(t1), -sin(t1)], [sin(t1), cos(t1)]]) >>> A2 = matrix([[cos(t2), -sin(t2)], [sin(t2), cos(t2)]]) >>> sqrtm(A1) [0.930507621912314 -0.366272529086048] [0.366272529086048 0.930507621912314] >>> A2 [0.930507621912314 -0.366272529086048] [0.366272529086048 0.930507621912314] The identity `(A^2)^{1/2} = A` does not necessarily hold:: >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) >>> sqrtm(A**2) [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] >>> sqrtm(A)**2 [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] >>> A = matrix([[-4,1,4],[7,-8,9],[10,2,11]]) >>> sqrtm(A**2) [ 7.43715112194995 -0.324127569985474 1.8481718827526] [-0.251549715716942 9.32699765900402 2.48221180985147] [ 4.11609388833616 0.775751877098258 13.017955697342] >>> chop(sqrtm(A)**2) [-4.0 1.0 4.0] [ 7.0 -8.0 9.0] [10.0 2.0 11.0] For some matrices, a square root does not exist:: >>> sqrtm([[0,1], [0,0]]) Traceback (most recent call last): ... ZeroDivisionError: matrix is numerically singular Two examples from the documentation for Matlab's ``sqrtm``:: >>> mp.dps = 15; mp.pretty = True >>> sqrtm([[7,10],[15,22]]) [1.56669890360128 1.74077655955698] [2.61116483933547 4.17786374293675] >>> >>> X = matrix(\ ... [[5,-4,1,0,0], ... [-4,6,-4,1,0], ... [1,-4,6,-4,1], ... [0,1,-4,6,-4], ... [0,0,1,-4,5]]) >>> Y = matrix(\ ... [[2,-1,-0,-0,-0], ... [-1,2,-1,0,-0], ... [0,-1,2,-1,0], ... [-0,0,-1,2,-1], ... [-0,-0,-0,-1,2]]) >>> mnorm(sqrtm(X) - Y) 4.53155328326114e-19 """ A = ctx.matrix(A) # Trivial if A*0 == A: return A prec = ctx.prec if _may_rotate: d = ctx.det(A) if abs(ctx.im(d)) < 16*ctx.eps and ctx.re(d) < 0: return ctx._sqrtm_rot(A, _may_rotate-1) try: ctx.prec += 10 tol = ctx.eps * 128 Y = A Z = I = A**0 k = 0 # Denman-Beavers iteration while 1: Yprev = Y try: Y, Z = 0.5*(Y+ctx.inverse(Z)), 0.5*(Z+ctx.inverse(Y)) except ZeroDivisionError: if _may_rotate: Y = ctx._sqrtm_rot(A, _may_rotate-1) break else: raise mag1 = ctx.mnorm(Y-Yprev, 'inf') mag2 = ctx.mnorm(Y, 'inf') if mag1 <= mag2*tol: break if _may_rotate and k > 6 and not mag1 < mag2 * 0.001: return ctx._sqrtm_rot(A, _may_rotate-1) k += 1 if k > ctx.prec: raise ctx.NoConvergence finally: ctx.prec = prec Y *= 1 return Y def logm(ctx, A): r""" Computes a logarithm of the square matrix `A`, i.e. returns a matrix `B = \log(A)` such that `\exp(B) = A`. The logarithm of a matrix, if it exists, is not unique. **Examples** Logarithms of some simple matrices:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> X = eye(3) >>> logm(X) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] >>> logm(2*X) [0.693147180559945 0.0 0.0] [ 0.0 0.693147180559945 0.0] [ 0.0 0.0 0.693147180559945] >>> logm(expm(X)) [1.0 0.0 0.0] [0.0 1.0 0.0] [0.0 0.0 1.0] A logarithm of a complex matrix:: >>> X = matrix([[2+j, 1, 3], [1-j, 1-2*j, 1], [-4, -5, j]]) >>> B = logm(X) >>> nprint(B) [ (0.808757 + 0.107759j) (2.20752 + 0.202762j) (1.07376 - 0.773874j)] [ (0.905709 - 0.107795j) (0.0287395 - 0.824993j) (0.111619 + 0.514272j)] [(-0.930151 + 0.399512j) (-2.06266 - 0.674397j) (0.791552 + 0.519839j)] >>> chop(expm(B)) [(2.0 + 1.0j) 1.0 3.0] [(1.0 - 1.0j) (1.0 - 2.0j) 1.0] [ -4.0 -5.0 (0.0 + 1.0j)] A matrix `X` close to the identity matrix, for which `\log(\exp(X)) = \exp(\log(X)) = X` holds:: >>> X = eye(3) + hilbert(3)/4 >>> X [ 1.25 0.125 0.0833333333333333] [ 0.125 1.08333333333333 0.0625] [0.0833333333333333 0.0625 1.05] >>> logm(expm(X)) [ 1.25 0.125 0.0833333333333333] [ 0.125 1.08333333333333 0.0625] [0.0833333333333333 0.0625 1.05] >>> expm(logm(X)) [ 1.25 0.125 0.0833333333333333] [ 0.125 1.08333333333333 0.0625] [0.0833333333333333 0.0625 1.05] A logarithm of a rotation matrix, giving back the angle of the rotation:: >>> t = 3.7 >>> A = matrix([[cos(t),sin(t)],[-sin(t),cos(t)]]) >>> chop(logm(A)) [ 0.0 -2.58318530717959] [2.58318530717959 0.0] >>> (2*pi-t) 2.58318530717959 For some matrices, a logarithm does not exist:: >>> logm([[1,0], [0,0]]) Traceback (most recent call last): ... ZeroDivisionError: matrix is numerically singular Logarithm of a matrix with large entries:: >>> logm(hilbert(3) * 10**20).apply(re) [ 45.5597513593433 1.27721006042799 0.317662687717978] [ 1.27721006042799 42.5222778973542 2.24003708791604] [0.317662687717978 2.24003708791604 42.395212822267] """ A = ctx.matrix(A) prec = ctx.prec try: ctx.prec += 10 tol = ctx.eps * 128 I = A**0 B = A n = 0 while 1: B = ctx.sqrtm(B) n += 1 if ctx.mnorm(B-I, 'inf') < 0.125: break T = X = B-I L = X*0 k = 1 while 1: if k & 1: L += T / k else: L -= T / k T *= X if ctx.mnorm(T, 'inf') < tol: break k += 1 if k > ctx.prec: raise ctx.NoConvergence finally: ctx.prec = prec L *= 2**n return L def powm(ctx, A, r): r""" Computes `A^r = \exp(A \log r)` for a matrix `A` and complex number `r`. **Examples** Powers and inverse powers of a matrix:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) >>> powm(A, 2) [ 63.0 20.0 69.0] [174.0 89.0 199.0] [164.0 48.0 179.0] >>> chop(powm(powm(A, 4), 1/4.)) [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] >>> powm(extraprec(20)(powm)(A, -4), -1/4.) [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] >>> chop(powm(powm(A, 1+0.5j), 1/(1+0.5j))) [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] >>> powm(extraprec(5)(powm)(A, -1.5), -1/(1.5)) [ 4.0 1.0 4.0] [ 7.0 8.0 9.0] [10.0 2.0 11.0] A Fibonacci-generating matrix:: >>> powm([[1,1],[1,0]], 10) [89.0 55.0] [55.0 34.0] >>> fib(10) 55.0 >>> powm([[1,1],[1,0]], 6.5) [(16.5166626964253 - 0.0121089837381789j) (10.2078589271083 + 0.0195927472575932j)] [(10.2078589271083 + 0.0195927472575932j) (6.30880376931698 - 0.0317017309957721j)] >>> (phi**6.5 - (1-phi)**6.5)/sqrt(5) (10.2078589271083 - 0.0195927472575932j) >>> powm([[1,1],[1,0]], 6.2) [ (14.3076953002666 - 0.008222855781077j) (8.81733464837593 + 0.0133048601383712j)] [(8.81733464837593 + 0.0133048601383712j) (5.49036065189071 - 0.0215277159194482j)] >>> (phi**6.2 - (1-phi)**6.2)/sqrt(5) (8.81733464837593 - 0.0133048601383712j) """ A = ctx.matrix(A) r = ctx.convert(r) prec = ctx.prec try: ctx.prec += 10 if ctx.isint(r): v = A ** int(r) elif ctx.isint(r*2): y = int(r*2) v = ctx.sqrtm(A) ** y else: v = ctx.expm(r*ctx.logm(A)) finally: ctx.prec = prec v *= 1 return v
18,609
33.981203
96
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/linalg.py
""" Linear algebra -------------- Linear equations ................ Basic linear algebra is implemented; you can for example solve the linear equation system:: x + 2*y = -10 3*x + 4*y = 10 using ``lu_solve``:: >>> from mpmath import * >>> mp.pretty = False >>> A = matrix([[1, 2], [3, 4]]) >>> b = matrix([-10, 10]) >>> x = lu_solve(A, b) >>> x matrix( [['30.0'], ['-20.0']]) If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||:: >>> residual(A, x, b) matrix( [['3.46944695195361e-18'], ['3.46944695195361e-18']]) >>> str(eps) '2.22044604925031e-16' As you can see, the solution is quite accurate. The error is caused by the inaccuracy of the internal floating point arithmetic. Though, it's even smaller than the current machine epsilon, which basically means you can trust the result. If you need more speed, use NumPy. Or choose a faster data type using the keyword ``force_type``:: >>> lu_solve(A, b, force_type=float) matrix( [['30.0'], ['-20.0']]) ``lu_solve`` accepts overdetermined systems. It is usually not possible to solve such systems, so the residual is minimized instead. Internally this is done using Cholesky decomposition to compute a least squares approximation. This means that that ``lu_solve`` will square the errors. If you can't afford this, use ``qr_solve`` instead. It is twice as slow but more accurate, and it calculates the residual automatically. Matrix factorization .................... The function ``lu`` computes an explicit LU factorization of a matrix:: >>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]])) >>> print(P) [0.0 0.0 1.0] [1.0 0.0 0.0] [0.0 1.0 0.0] >>> print(L) [ 1.0 0.0 0.0] [ 0.0 1.0 0.0] [0.571428571428571 0.214285714285714 1.0] >>> print(U) [7.0 8.0 9.0] [0.0 2.0 3.0] [0.0 0.0 0.214285714285714] >>> print(P.T*L*U) [0.0 2.0 3.0] [4.0 5.0 6.0] [7.0 8.0 9.0] Interval matrices ----------------- Matrices may contain interval elements. This allows one to perform basic linear algebra operations such as matrix multiplication and equation solving with rigorous error bounds:: >>> a = iv.matrix([['0.1','0.3','1.0'], ... ['7.1','5.5','4.8'], ... ['3.2','4.4','5.6']], force_type=mpi) >>> >>> b = iv.matrix(['4','0.6','0.5'], force_type=mpi) >>> c = iv.lu_solve(a, b) >>> print(c) [ [5.2582327113062568605927528666, 5.25823271130625686059275702219]] [[-13.1550493962678375411635581388, -13.1550493962678375411635540152]] [ [7.42069154774972557628979076189, 7.42069154774972557628979190734]] >>> print(a*c) [ [3.99999999999999999999999844904, 4.00000000000000000000000155096]] [[0.599999999999999999999968898009, 0.600000000000000000000031763736]] [[0.499999999999999999999979320485, 0.500000000000000000000020679515]] """ # TODO: # *implement high-level qr() # *test unitvector # *iterative solving from copy import copy from ..libmp.backend import xrange class LinearAlgebraMethods(object): def LU_decomp(ctx, A, overwrite=False, use_cache=True): """ LU-factorization of a n*n matrix using the Gauss algorithm. Returns L and U in one matrix and the pivot indices. Use overwrite to specify whether A will be overwritten with L and U. """ if not A.rows == A.cols: raise ValueError('need n*n matrix') # get from cache if possible if use_cache and isinstance(A, ctx.matrix) and A._LU: return A._LU if not overwrite: orig = A A = A.copy() tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger n = A.rows p = [None]*(n - 1) for j in xrange(n - 1): # pivoting, choose max(abs(reciprocal row sum)*abs(pivot element)) biggest = 0 for k in xrange(j, n): s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)]) if ctx.absmin(s) <= tol: raise ZeroDivisionError('matrix is numerically singular') current = 1/s * ctx.absmin(A[k,j]) if current > biggest: # TODO: what if equal? biggest = current p[j] = k # swap rows according to p ctx.swap_row(A, j, p[j]) if ctx.absmin(A[j,j]) <= tol: raise ZeroDivisionError('matrix is numerically singular') # calculate elimination factors and add rows for i in xrange(j + 1, n): A[i,j] /= A[j,j] for k in xrange(j + 1, n): A[i,k] -= A[i,j]*A[j,k] if ctx.absmin(A[n - 1,n - 1]) <= tol: raise ZeroDivisionError('matrix is numerically singular') # cache decomposition if not overwrite and isinstance(orig, ctx.matrix): orig._LU = (A, p) return A, p def L_solve(ctx, L, b, p=None): """ Solve the lower part of a LU factorized matrix for y. """ if L.rows != L.cols: raise RuntimeError("need n*n matrix") n = L.rows if len(b) != n: raise ValueError("Value should be equal to n") b = copy(b) if p: # swap b according to p for k in xrange(0, len(p)): ctx.swap_row(b, k, p[k]) # solve for i in xrange(1, n): for j in xrange(i): b[i] -= L[i,j] * b[j] return b def U_solve(ctx, U, y): """ Solve the upper part of a LU factorized matrix for x. """ if U.rows != U.cols: raise RuntimeError("need n*n matrix") n = U.rows if len(y) != n: raise ValueError("Value should be equal to n") x = copy(y) for i in xrange(n - 1, -1, -1): for j in xrange(i + 1, n): x[i] -= U[i,j] * x[j] x[i] /= U[i,i] return x def lu_solve(ctx, A, b, **kwargs): """ Ax = b => x Solve a determined or overdetermined linear equations system. Fast LU decomposition is used, which is less accurate than QR decomposition (especially for overdetermined systems), but it's twice as efficient. Use qr_solve if you want more precision or have to solve a very ill- conditioned system. If you specify real=True, it does not check for overdeterminded complex systems. """ prec = ctx.prec try: ctx.prec += 10 # do not overwrite A nor b A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() if A.rows < A.cols: raise ValueError('cannot solve underdetermined system') if A.rows > A.cols: # use least-squares method if overdetermined # (this increases errors) AH = A.H A = AH * A b = AH * b if (kwargs.get('real', False) or not sum(type(i) is ctx.mpc for i in A)): # TODO: necessary to check also b? x = ctx.cholesky_solve(A, b) else: x = ctx.lu_solve(A, b) else: # LU factorization A, p = ctx.LU_decomp(A) b = ctx.L_solve(A, b, p) x = ctx.U_solve(A, b) finally: ctx.prec = prec return x def improve_solution(ctx, A, x, b, maxsteps=1): """ Improve a solution to a linear equation system iteratively. This re-uses the LU decomposition and is thus cheap. Usually 3 up to 4 iterations are giving the maximal improvement. """ if A.rows != A.cols: raise RuntimeError("need n*n matrix") # TODO: really? for _ in xrange(maxsteps): r = ctx.residual(A, x, b) if ctx.norm(r, 2) < 10*ctx.eps: break # this uses cached LU decomposition and is thus cheap dx = ctx.lu_solve(A, -r) x += dx return x def lu(ctx, A): """ A -> P, L, U LU factorisation of a square matrix A. L is the lower, U the upper part. P is the permutation matrix indicating the row swaps. P*A = L*U If you need efficiency, use the low-level method LU_decomp instead, it's much more memory efficient. """ # get factorization A, p = ctx.LU_decomp(A) n = A.rows L = ctx.matrix(n) U = ctx.matrix(n) for i in xrange(n): for j in xrange(n): if i > j: L[i,j] = A[i,j] elif i == j: L[i,j] = 1 U[i,j] = A[i,j] else: U[i,j] = A[i,j] # calculate permutation matrix P = ctx.eye(n) for k in xrange(len(p)): ctx.swap_row(P, k, p[k]) return P, L, U def unitvector(ctx, n, i): """ Return the i-th n-dimensional unit vector. """ assert 0 < i <= n, 'this unit vector does not exist' return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i) def inverse(ctx, A, **kwargs): """ Calculate the inverse of a matrix. If you want to solve an equation system Ax = b, it's recommended to use solve(A, b) instead, it's about 3 times more efficient. """ prec = ctx.prec try: ctx.prec += 10 # do not overwrite A A = ctx.matrix(A, **kwargs).copy() n = A.rows # get LU factorisation A, p = ctx.LU_decomp(A) cols = [] # calculate unit vectors and solve corresponding system to get columns for i in xrange(1, n + 1): e = ctx.unitvector(n, i) y = ctx.L_solve(A, e, p) cols.append(ctx.U_solve(A, y)) # convert columns to matrix inv = [] for i in xrange(n): row = [] for j in xrange(n): row.append(cols[j][i]) inv.append(row) result = ctx.matrix(inv, **kwargs) finally: ctx.prec = prec return result def householder(ctx, A): """ (A|b) -> H, p, x, res (A|b) is the coefficient matrix with left hand side of an optionally overdetermined linear equation system. H and p contain all information about the transformation matrices. x is the solution, res the residual. """ if not isinstance(A, ctx.matrix): raise TypeError("A should be a type of ctx.matrix") m = A.rows n = A.cols if m < n - 1: raise RuntimeError("Columns should not be less than rows") # calculate Householder matrix p = [] for j in xrange(0, n - 1): s = ctx.fsum((A[i,j])**2 for i in xrange(j, m)) if not abs(s) > ctx.eps: raise ValueError('matrix is numerically singular') p.append(-ctx.sign(A[j,j]) * ctx.sqrt(s)) kappa = ctx.one / (s - p[j] * A[j,j]) A[j,j] -= p[j] for k in xrange(j+1, n): y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j, m)) * kappa for i in xrange(j, m): A[i,k] -= A[i,j] * y # solve Rx = c1 x = [A[i,n - 1] for i in xrange(n - 1)] for i in xrange(n - 2, -1, -1): x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1)) x[i] /= p[i] # calculate residual if not m == n - 1: r = [A[m-1-i, n-1] for i in xrange(m - n + 1)] else: # determined system, residual should be 0 r = [0]*m # maybe a bad idea, changing r[i] will change all elements return A, p, x, r #def qr(ctx, A): # """ # A -> Q, R # # QR factorisation of a square matrix A using Householder decomposition. # Q is orthogonal, this leads to very few numerical errors. # # A = Q*R # """ # H, p, x, res = householder(A) # TODO: implement this def residual(ctx, A, x, b, **kwargs): """ Calculate the residual of a solution to a linear equation system. r = A*x - b for A*x = b """ oldprec = ctx.prec try: ctx.prec *= 2 A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs) return A*x - b finally: ctx.prec = oldprec def qr_solve(ctx, A, b, norm=None, **kwargs): """ Ax = b => x, ||Ax - b|| Solve a determined or overdetermined linear equations system and calculate the norm of the residual (error). QR decomposition using Householder factorization is applied, which gives very accurate results even for ill-conditioned matrices. qr_solve is twice as efficient. """ if norm is None: norm = ctx.norm prec = ctx.prec try: ctx.prec += 10 # do not overwrite A nor b A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() if A.rows < A.cols: raise ValueError('cannot solve underdetermined system') H, p, x, r = ctx.householder(ctx.extend(A, b)) res = ctx.norm(r) # calculate residual "manually" for determined systems if res == 0: res = ctx.norm(ctx.residual(A, x, b)) return ctx.matrix(x, **kwargs), res finally: ctx.prec = prec def cholesky(ctx, A, tol=None): r""" Cholesky decomposition of a symmetric positive-definite matrix `A`. Returns a lower triangular matrix `L` such that `A = L \times L^T`. More generally, for a complex Hermitian positive-definite matrix, a Cholesky decomposition satisfying `A = L \times L^H` is returned. The Cholesky decomposition can be used to solve linear equation systems twice as efficiently as LU decomposition, or to test whether `A` is positive-definite. The optional parameter ``tol`` determines the tolerance for verifying positive-definiteness. **Examples** Cholesky decomposition of a positive-definite symmetric matrix:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> A = eye(3) + hilbert(3) >>> nprint(A) [ 2.0 0.5 0.333333] [ 0.5 1.33333 0.25] [0.333333 0.25 1.2] >>> L = cholesky(A) >>> nprint(L) [ 1.41421 0.0 0.0] [0.353553 1.09924 0.0] [0.235702 0.15162 1.05899] >>> chop(A - L*L.T) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] Cholesky decomposition of a Hermitian matrix:: >>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]]) >>> L = cholesky(A) >>> nprint(L) [ 1.0 0.0 0.0] [(0.0 - 0.25j) (0.968246 + 0.0j) 0.0] [ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)] >>> chop(A - L*L.H) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] Attempted Cholesky decomposition of a matrix that is not positive definite:: >>> A = -eye(3) + hilbert(3) >>> L = cholesky(A) Traceback (most recent call last): ... ValueError: matrix is not positive-definite **References** 1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition """ if not isinstance(A, ctx.matrix): raise RuntimeError("A should be a type of ctx.matrix") if not A.rows == A.cols: raise ValueError('need n*n matrix') if tol is None: tol = +ctx.eps n = A.rows L = ctx.matrix(n) for j in xrange(n): c = ctx.re(A[j,j]) if abs(c-A[j,j]) > tol: raise ValueError('matrix is not Hermitian') s = c - ctx.fsum((L[j,k] for k in xrange(j)), absolute=True, squared=True) if s < tol: raise ValueError('matrix is not positive-definite') L[j,j] = ctx.sqrt(s) for i in xrange(j, n): it1 = (L[i,k] for k in xrange(j)) it2 = (L[j,k] for k in xrange(j)) t = ctx.fdot(it1, it2, conjugate=True) L[i,j] = (A[i,j] - t) / L[j,j] return L def cholesky_solve(ctx, A, b, **kwargs): """ Ax = b => x Solve a symmetric positive-definite linear equation system. This is twice as efficient as lu_solve. Typical use cases: * A.T*A * Hessian matrix * differential equations """ prec = ctx.prec try: ctx.prec += 10 # do not overwrite A nor b A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() if A.rows != A.cols: raise ValueError('can only solve determined system') # Cholesky factorization L = ctx.cholesky(A) # solve n = L.rows if len(b) != n: raise ValueError("Value should be equal to n") for i in xrange(n): b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i)) b[i] /= L[i,i] x = ctx.U_solve(L.T, b) return x finally: ctx.prec = prec def det(ctx, A): """ Calculate the determinant of a matrix. """ prec = ctx.prec try: # do not overwrite A A = ctx.matrix(A).copy() # use LU factorization to calculate determinant try: R, p = ctx.LU_decomp(A) except ZeroDivisionError: return 0 z = 1 for i, e in enumerate(p): if i != e: z *= -1 for i in xrange(A.rows): z *= R[i,i] return z finally: ctx.prec = prec def cond(ctx, A, norm=None): """ Calculate the condition number of a matrix using a specified matrix norm. The condition number estimates the sensitivity of a matrix to errors. Example: small input errors for ill-conditioned coefficient matrices alter the solution of the system dramatically. For ill-conditioned matrices it's recommended to use qr_solve() instead of lu_solve(). This does not help with input errors however, it just avoids to add additional errors. Definition: cond(A) = ||A|| * ||A**-1|| """ if norm is None: norm = lambda x: ctx.mnorm(x,1) return norm(A) * norm(ctx.inverse(A)) def lu_solve_mat(ctx, a, b): """Solve a * x = b where a and b are matrices.""" r = ctx.matrix(a.rows, b.cols) for i in range(b.cols): c = ctx.lu_solve(a, b.column(i)) for j in range(len(c)): r[j, i] = c[j] return r def qr(ctx, A, mode = 'full', edps = 10): """ Compute a QR factorization $A = QR$ where A is an m x n matrix of real or complex numbers where m >= n mode has following meanings: (1) mode = 'raw' returns two matrixes (A, tau) in the internal format used by LAPACK (2) mode = 'skinny' returns the leading n columns of Q and n rows of R (3) Any other value returns the leading m columns of Q and m rows of R edps is the increase in mp precision used for calculations **Examples** >>> from mpmath import * >>> mp.dps = 15 >>> mp.pretty = True >>> A = matrix([[1, 2], [3, 4], [1, 1]]) >>> Q, R = qr(A) >>> Q [-0.301511344577764 0.861640436855329 0.408248290463863] [-0.904534033733291 -0.123091490979333 -0.408248290463863] [-0.301511344577764 -0.492365963917331 0.816496580927726] >>> R [-3.3166247903554 -4.52267016866645] [ 0.0 0.738548945875996] [ 0.0 0.0] >>> Q * R [1.0 2.0] [3.0 4.0] [1.0 1.0] >>> chop(Q.T * Q) [1.0 0.0 0.0] [0.0 1.0 0.0] [0.0 0.0 1.0] >>> B = matrix([[1+0j, 2-3j], [3+j, 4+5j]]) >>> Q, R = qr(B) >>> nprint(Q) [ (-0.301511 + 0.0j) (0.0695795 - 0.95092j)] [(-0.904534 - 0.301511j) (-0.115966 + 0.278318j)] >>> nprint(R) [(-3.31662 + 0.0j) (-5.72872 - 2.41209j)] [ 0.0 (3.91965 + 0.0j)] >>> Q * R [(1.0 + 0.0j) (2.0 - 3.0j)] [(3.0 + 1.0j) (4.0 + 5.0j)] >>> chop(Q.T * Q.conjugate()) [1.0 0.0] [0.0 1.0] """ # check values before continuing assert isinstance(A, ctx.matrix) m = A.rows n = A.cols assert n > 1 assert m >= n assert edps >= 0 # check for complex data type cmplx = any(type(x) is ctx.mpc for x in A) # temporarily increase the precision and initialize with ctx.extradps(edps): tau = ctx.matrix(n,1) A = A.copy() # --------------- # FACTOR MATRIX A # --------------- if cmplx: one = ctx.mpc('1.0', '0.0') zero = ctx.mpc('0.0', '0.0') rzero = ctx.mpf('0.0') # main loop to factor A (complex) for j in xrange(0, n): alpha = A[j,j] alphr = ctx.re(alpha) alphi = ctx.im(alpha) if (m-j) >= 2: xnorm = ctx.fsum( A[i,j]*ctx.conj(A[i,j]) for i in xrange(j+1, m) ) xnorm = ctx.re( ctx.sqrt(xnorm) ) else: xnorm = rzero if (xnorm == rzero) and (alphi == rzero): tau[j] = zero continue if alphr < rzero: beta = ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) else: beta = -ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) tau[j] = ctx.mpc( (beta - alphr) / beta, -alphi / beta ) t = -ctx.conj(tau[j]) za = one / (alpha - beta) for i in xrange(j+1, m): A[i,j] *= za A[j,j] = one for k in xrange(j+1, n): y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j, m)) temp = t * ctx.conj(y) for i in xrange(j, m): A[i,k] += A[i,j] * temp A[j,j] = ctx.mpc(beta, '0.0') else: one = ctx.mpf('1.0') zero = ctx.mpf('0.0') # main loop to factor A (real) for j in xrange(0, n): alpha = A[j,j] if (m-j) > 2: xnorm = ctx.fsum( (A[i,j])**2 for i in xrange(j+1, m) ) xnorm = ctx.sqrt(xnorm) elif (m-j) == 2: xnorm = abs( A[m-1,j] ) else: xnorm = zero if xnorm == zero: tau[j] = zero continue if alpha < zero: beta = ctx.sqrt(alpha**2 + xnorm**2) else: beta = -ctx.sqrt(alpha**2 + xnorm**2) tau[j] = (beta - alpha) / beta t = -tau[j] da = one / (alpha - beta) for i in xrange(j+1, m): A[i,j] *= da A[j,j] = one for k in xrange(j+1, n): y = ctx.fsum( A[i,j] * A[i,k] for i in xrange(j, m) ) temp = t * y for i in xrange(j,m): A[i,k] += A[i,j] * temp A[j,j] = beta # return factorization in same internal format as LAPACK if (mode == 'raw') or (mode == 'RAW'): return A, tau # ---------------------------------- # FORM Q USING BACKWARD ACCUMULATION # ---------------------------------- # form R before the values are overwritten R = A.copy() for j in xrange(0, n): for i in xrange(j+1, m): R[i,j] = zero # set the value of p (number of columns of Q to return) p = m if (mode == 'skinny') or (mode == 'SKINNY'): p = n # add columns to A if needed and initialize A.cols += (p-n) for j in xrange(0, p): A[j,j] = one for i in xrange(0, j): A[i,j] = zero # main loop to form Q for j in xrange(n-1, -1, -1): t = -tau[j] A[j,j] += t for k in xrange(j+1, p): if cmplx: y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j+1, m)) temp = t * ctx.conj(y) else: y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j+1, m)) temp = t * y A[j,k] = temp for i in xrange(j+1, m): A[i,k] += A[i,j] * temp for i in xrange(j+1, m): A[i, j] *= t return A, R[0:p,0:n] # ------------------ # END OF FUNCTION QR # ------------------
26,999
33.005038
95
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/eigen.py
#!/usr/bin/python # -*- coding: utf-8 -*- ################################################################################################## # module for the eigenvalue problem # Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) # # todo: # - implement balancing # - agressive early deflation # ################################################################################################## """ The eigenvalue problem ---------------------- This file contains routines for the eigenvalue problem. high level routines: hessenberg : reduction of a real or complex square matrix to upper Hessenberg form schur : reduction of a real or complex square matrix to upper Schur form eig : eigenvalues and eigenvectors of a real or complex square matrix low level routines: hessenberg_reduce_0 : reduction of a real or complex square matrix to upper Hessenberg form hessenberg_reduce_1 : auxiliary routine to hessenberg_reduce_0 qr_step : a single implicitly shifted QR step for an upper Hessenberg matrix hessenberg_qr : Schur decomposition of an upper Hessenberg matrix eig_tr_r : right eigenvectors of an upper triangular matrix eig_tr_l : left eigenvectors of an upper triangular matrix """ from ..libmp.backend import xrange class Eigen(object): pass def defun(f): setattr(Eigen, f.__name__, f) def hessenberg_reduce_0(ctx, A, T): """ This routine computes the (upper) Hessenberg decomposition of a square matrix A. Given A, an unitary matrix Q is calculated such that Q' A Q = H and Q' Q = Q Q' = 1 where H is an upper Hessenberg matrix, meaning that it only contains zeros below the first subdiagonal. Here ' denotes the hermitian transpose (i.e. transposition and conjugation). parameters: A (input/output) On input, A contains the square matrix A of dimension (n,n). On output, A contains a compressed representation of Q and H. T (output) An array of length n containing the first elements of the Householder reflectors. """ # internally we work with householder reflections from the right. # let u be a row vector (i.e. u[i]=A[i,:i]). then # Q is build up by reflectors of the type (1-v'v) where v is a suitable # modification of u. these reflectors are applyed to A from the right. # because we work with reflectors from the right we have to start with # the bottom row of A and work then upwards (this corresponds to # some kind of RQ decomposition). # the first part of the vectors v (i.e. A[i,:(i-1)]) are stored as row vectors # in the lower left part of A (excluding the diagonal and subdiagonal). # the last entry of v is stored in T. # the upper right part of A (including diagonal and subdiagonal) becomes H. n = A.rows if n <= 2: return for i in xrange(n-1, 1, -1): # scale the vector scale = 0 for k in xrange(0, i): scale += abs(ctx.re(A[i,k])) + abs(ctx.im(A[i,k])) scale_inv = 0 if scale != 0: scale_inv = 1 / scale if scale == 0 or ctx.isinf(scale_inv): # sadly there are floating point numbers not equal to zero whose reciprocal is infinity T[i] = 0 A[i,i-1] = 0 continue # calculate parameters for housholder transformation H = 0 for k in xrange(0, i): A[i,k] *= scale_inv rr = ctx.re(A[i,k]) ii = ctx.im(A[i,k]) H += rr * rr + ii * ii F = A[i,i-1] f = abs(F) G = ctx.sqrt(H) A[i,i-1] = - G * scale if f == 0: T[i] = G else: ff = F / f T[i] = F + G * ff A[i,i-1] *= ff H += G * f H = 1 / ctx.sqrt(H) T[i] *= H for k in xrange(0, i - 1): A[i,k] *= H for j in xrange(0, i): # apply housholder transformation (from right) G = ctx.conj(T[i]) * A[j,i-1] for k in xrange(0, i-1): G += ctx.conj(A[i,k]) * A[j,k] A[j,i-1] -= G * T[i] for k in xrange(0, i-1): A[j,k] -= G * A[i,k] for j in xrange(0, n): # apply housholder transformation (from left) G = T[i] * A[i-1,j] for k in xrange(0, i-1): G += A[i,k] * A[k,j] A[i-1,j] -= G * ctx.conj(T[i]) for k in xrange(0, i-1): A[k,j] -= G * ctx.conj(A[i,k]) def hessenberg_reduce_1(ctx, A, T): """ This routine forms the unitary matrix Q described in hessenberg_reduce_0. parameters: A (input/output) On input, A is the same matrix as delivered by hessenberg_reduce_0. On output, A is set to Q. T (input) On input, T is the same array as delivered by hessenberg_reduce_0. """ n = A.rows if n == 1: A[0,0] = 1 return A[0,0] = A[1,1] = 1 A[0,1] = A[1,0] = 0 for i in xrange(2, n): if T[i] != 0: for j in xrange(0, i): G = T[i] * A[i-1,j] for k in xrange(0, i-1): G += A[i,k] * A[k,j] A[i-1,j] -= G * ctx.conj(T[i]) for k in xrange(0, i-1): A[k,j] -= G * ctx.conj(A[i,k]) A[i,i] = 1 for j in xrange(0, i): A[j,i] = A[i,j] = 0 @defun def hessenberg(ctx, A, overwrite_a = False): """ This routine computes the Hessenberg decomposition of a square matrix A. Given A, an unitary matrix Q is determined such that Q' A Q = H and Q' Q = Q Q' = 1 where H is an upper right Hessenberg matrix. Here ' denotes the hermitian transpose (i.e. transposition and conjugation). input: A : a real or complex square matrix overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: Q : an unitary matrix H : an upper right Hessenberg matrix example: >>> from mpmath import mp >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) >>> Q, H = mp.hessenberg(A) >>> mp.nprint(H, 3) # doctest:+SKIP [ 3.15 2.23 4.44] [-0.769 4.85 3.05] [ 0.0 3.61 7.0] >>> print(mp.chop(A - Q * H * Q.transpose_conj())) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] return value: (Q, H) """ n = A.rows if n == 1: return (ctx.matrix([[1]]), A) if not overwrite_a: A = A.copy() T = ctx.matrix(n, 1) hessenberg_reduce_0(ctx, A, T) Q = A.copy() hessenberg_reduce_1(ctx, Q, T) for x in xrange(n): for y in xrange(x+2, n): A[y,x] = 0 return Q, A ########################################################################### def qr_step(ctx, n0, n1, A, Q, shift): """ This subroutine executes a single implicitly shifted QR step applied to an upper Hessenberg matrix A. Given A and shift as input, first an QR decomposition is calculated: Q R = A - shift * 1 . The output is then following matrix: R Q + shift * 1 parameters: n0, n1 (input) Two integers which specify the submatrix A[n0:n1,n0:n1] on which this subroutine operators. The subdiagonal elements to the left and below this submatrix must be deflated (i.e. zero). following restriction is imposed: n1>=n0+2 A (input/output) On input, A is an upper Hessenberg matrix. On output, A is replaced by "R Q + shift * 1" Q (input/output) The parameter Q is multiplied by the unitary matrix Q arising from the QR decomposition. Q can also be false, in which case the unitary matrix Q is not computated. shift (input) a complex number specifying the shift. idealy close to an eigenvalue of the bottemmost part of the submatrix A[n0:n1,n0:n1]. references: Stoer, Bulirsch - Introduction to Numerical Analysis. Kresser : Numerical Methods for General and Structured Eigenvalue Problems """ # implicitly shifted and bulge chasing is explained at p.398/399 in "Stoer, Bulirsch - Introduction to Numerical Analysis" # for bulge chasing see also "Watkins - The Matrix Eigenvalue Problem" sec.4.5,p.173 # the Givens rotation we used is determined as follows: let c,s be two complex # numbers. then we have following relation: # # v = sqrt(|c|^2 + |s|^2) # # 1/v [ c~ s~] [c] = [v] # [-s c ] [s] [0] # # the matrix on the left is our Givens rotation. n = A.rows # first step # calculate givens rotation c = A[n0 ,n0] - shift s = A[n0+1,n0] v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) if v == 0: v = 1 c = 1 s = 0 else: c /= v s /= v for k in xrange(n0, n): # apply givens rotation from the left x = A[n0 ,k] y = A[n0+1,k] A[n0 ,k] = ctx.conj(c) * x + ctx.conj(s) * y A[n0+1,k] = -s * x + c * y for k in xrange(min(n1, n0+3)): # apply givens rotation from the right x = A[k,n0 ] y = A[k,n0+1] A[k,n0 ] = c * x + s * y A[k,n0+1] = -ctx.conj(s) * x + ctx.conj(c) * y if not isinstance(Q, bool): for k in xrange(n): # eigenvectors x = Q[k,n0 ] y = Q[k,n0+1] Q[k,n0 ] = c * x + s * y Q[k,n0+1] = -ctx.conj(s) * x + ctx.conj(c) * y # chase the bulge for j in xrange(n0, n1 - 2): # calculate givens rotation c = A[j+1,j] s = A[j+2,j] v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) if v == 0: A[j+1,j] = 0 v = 1 c = 1 s = 0 else: A[j+1,j] = v c /= v s /= v A[j+2,j] = 0 for k in xrange(j+1, n): # apply givens rotation from the left x = A[j+1,k] y = A[j+2,k] A[j+1,k] = ctx.conj(c) * x + ctx.conj(s) * y A[j+2,k] = -s * x + c * y for k in xrange(0, min(n1, j+4)): # apply givens rotation from the right x = A[k,j+1] y = A[k,j+2] A[k,j+1] = c * x + s * y A[k,j+2] = -ctx.conj(s) * x + ctx.conj(c) * y if not isinstance(Q, bool): for k in xrange(0, n): # eigenvectors x = Q[k,j+1] y = Q[k,j+2] Q[k,j+1] = c * x + s * y Q[k,j+2] = -ctx.conj(s) * x + ctx.conj(c) * y def hessenberg_qr(ctx, A, Q): """ This routine computes the Schur decomposition of an upper Hessenberg matrix A. Given A, an unitary matrix Q is determined such that Q' A Q = R and Q' Q = Q Q' = 1 where R is an upper right triangular matrix. Here ' denotes the hermitian transpose (i.e. transposition and conjugation). parameters: A (input/output) On input, A contains an upper Hessenberg matrix. On output, A is replace by the upper right triangluar matrix R. Q (input/output) The parameter Q is multiplied by the unitary matrix Q arising from the Schur decomposition. Q can also be false, in which case the unitary matrix Q is not computated. """ n = A.rows norm = 0 for x in xrange(n): for y in xrange(min(x+2, n)): norm += ctx.re(A[y,x]) ** 2 + ctx.im(A[y,x]) ** 2 norm = ctx.sqrt(norm) / n if norm == 0: return n0 = 0 n1 = n eps = ctx.eps / (100 * n) maxits = ctx.dps * 4 its = totalits = 0 while 1: # kressner p.32 algo 3 # the active submatrix is A[n0:n1,n0:n1] k = n0 while k + 1 < n1: s = abs(ctx.re(A[k,k])) + abs(ctx.im(A[k,k])) + abs(ctx.re(A[k+1,k+1])) + abs(ctx.im(A[k+1,k+1])) if s < eps * norm: s = norm if abs(A[k+1,k]) < eps * s: break k += 1 if k + 1 < n1: # deflation found at position (k+1, k) A[k+1,k] = 0 n0 = k + 1 its = 0 if n0 + 1 >= n1: # block of size at most two has converged n0 = 0 n1 = k + 1 if n1 < 2: # QR algorithm has converged return else: if (its % 30) == 10: # exceptional shift shift = A[n1-1,n1-2] elif (its % 30) == 20: # exceptional shift shift = abs(A[n1-1,n1-2]) elif (its % 30) == 29: # exceptional shift shift = norm else: # A = [ a b ] det(x-A)=x*x-x*tr(A)+det(A) # [ c d ] # # eigenvalues bad: (tr(A)+sqrt((tr(A))**2-4*det(A)))/2 # bad because of cancellation if |c| is small and |a-d| is small, too. # # eigenvalues good: (a+d+sqrt((a-d)**2+4*b*c))/2 t = A[n1-2,n1-2] + A[n1-1,n1-1] s = (A[n1-1,n1-1] - A[n1-2,n1-2]) ** 2 + 4 * A[n1-1,n1-2] * A[n1-2,n1-1] if ctx.re(s) > 0: s = ctx.sqrt(s) else: s = ctx.sqrt(-s) * 1j a = (t + s) / 2 b = (t - s) / 2 if abs(A[n1-1,n1-1] - a) > abs(A[n1-1,n1-1] - b): shift = b else: shift = a its += 1 totalits += 1 qr_step(ctx, n0, n1, A, Q, shift) if its > maxits: raise RuntimeError("qr: failed to converge after %d steps" % its) @defun def schur(ctx, A, overwrite_a = False): """ This routine computes the Schur decomposition of a square matrix A. Given A, an unitary matrix Q is determined such that Q' A Q = R and Q' Q = Q Q' = 1 where R is an upper right triangular matrix. Here ' denotes the hermitian transpose (i.e. transposition and conjugation). input: A : a real or complex square matrix overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: Q : an unitary matrix R : an upper right triangular matrix return value: (Q, R) example: >>> from mpmath import mp >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) >>> Q, R = mp.schur(A) >>> mp.nprint(R, 3) # doctest:+SKIP [2.0 0.417 -2.53] [0.0 4.0 -4.74] [0.0 0.0 9.0] >>> print(mp.chop(A - Q * R * Q.transpose_conj())) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] warning: The Schur decomposition is not unique. """ n = A.rows if n == 1: return (ctx.matrix([[1]]), A) if not overwrite_a: A = A.copy() T = ctx.matrix(n, 1) hessenberg_reduce_0(ctx, A, T) Q = A.copy() hessenberg_reduce_1(ctx, Q, T) for x in xrange(n): for y in xrange(x + 2, n): A[y,x] = 0 hessenberg_qr(ctx, A, Q) return Q, A def eig_tr_r(ctx, A): """ This routine calculates the right eigenvectors of an upper right triangular matrix. input: A an upper right triangular matrix output: ER a matrix whose columns form the right eigenvectors of A return value: ER """ # this subroutine is inspired by the lapack routines ctrevc.f,clatrs.f n = A.rows ER = ctx.eye(n) eps = ctx.eps unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) # since mpmath effectively has no limits on the exponent, we simply scale doubles up # original double has prec*20 smlnum = unfl * (n / eps) simin = 1 / ctx.sqrt(eps) rmax = 1 for i in xrange(1, n): s = A[i,i] smin = max(eps * abs(s), smlnum) for j in xrange(i - 1, -1, -1): r = 0 for k in xrange(j + 1, i + 1): r += A[j,k] * ER[k,i] t = A[j,j] - s if abs(t) < smin: t = smin r = -r / t ER[j,i] = r rmax = max(rmax, abs(r)) if rmax > simin: for k in xrange(j, i+1): ER[k,i] /= rmax rmax = 1 if rmax != 1: for k in xrange(0, i + 1): ER[k,i] /= rmax return ER def eig_tr_l(ctx, A): """ This routine calculates the left eigenvectors of an upper right triangular matrix. input: A an upper right triangular matrix output: EL a matrix whose rows form the left eigenvectors of A return value: EL """ n = A.rows EL = ctx.eye(n) eps = ctx.eps unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) # since mpmath effectively has no limits on the exponent, we simply scale doubles up # original double has prec*20 smlnum = unfl * (n / eps) simin = 1 / ctx.sqrt(eps) rmax = 1 for i in xrange(0, n - 1): s = A[i,i] smin = max(eps * abs(s), smlnum) for j in xrange(i + 1, n): r = 0 for k in xrange(i, j): r += EL[i,k] * A[k,j] t = A[j,j] - s if abs(t) < smin: t = smin r = -r / t EL[i,j] = r rmax = max(rmax, abs(r)) if rmax > simin: for k in xrange(i, j + 1): EL[i,k] /= rmax rmax = 1 if rmax != 1: for k in xrange(i, n): EL[i,k] /= rmax return EL @defun def eig(ctx, A, left = False, right = True, overwrite_a = False): """ This routine computes the eigenvalues and optionally the left and right eigenvectors of a square matrix A. Given A, a vector E and matrices ER and EL are calculated such that A ER[:,i] = E[i] ER[:,i] EL[i,:] A = EL[i,:] E[i] E contains the eigenvalues of A. The columns of ER contain the right eigenvectors of A whereas the rows of EL contain the left eigenvectors. input: A : a real or complex square matrix of shape (n, n) left : if true, the left eigenvectors are calulated. right : if true, the right eigenvectors are calculated. overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: E : a list of length n containing the eigenvalues of A. ER : a matrix whose columns contain the right eigenvectors of A. EL : a matrix whose rows contain the left eigenvectors of A. return values: E if left and right are both false. (E, ER) if right is true and left is false. (E, EL) if left is true and right is false. (E, EL, ER) if left and right are true. examples: >>> from mpmath import mp >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) >>> E, ER = mp.eig(A) >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) [0.0] [0.0] [0.0] >>> E, EL, ER = mp.eig(A,left = True, right = True) >>> E, EL, ER = mp.eig_sort(E, EL, ER) >>> mp.nprint(E) [2.0, 4.0, 9.0] >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) [0.0] [0.0] [0.0] >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) [0.0 0.0 0.0] warning: - If there are multiple eigenvalues, the eigenvectors do not necessarily span the whole vectorspace, i.e. ER and EL may have not full rank. Furthermore in that case the eigenvectors are numerical ill-conditioned. - In the general case the eigenvalues have no natural order. see also: - eigh (or eigsy, eighe) for the symmetric eigenvalue problem. - eig_sort for sorting of eigenvalues and eigenvectors """ n = A.rows if n == 1: if left and (not right): return ([A[0]], ctx.matrix([[1]])) if right and (not left): return ([A[0]], ctx.matrix([[1]])) return ([A[0]], ctx.matrix([[1]]), ctx.matrix([[1]])) if not overwrite_a: A = A.copy() T = ctx.zeros(n, 1) hessenberg_reduce_0(ctx, A, T) if left or right: Q = A.copy() hessenberg_reduce_1(ctx, Q, T) else: Q = False for x in xrange(n): for y in xrange(x + 2, n): A[y,x] = 0 hessenberg_qr(ctx, A, Q) E = [0 for i in xrange(n)] for i in xrange(n): E[i] = A[i,i] if not (left or right): return E if left: EL = eig_tr_l(ctx, A) EL = EL * Q.transpose_conj() if right: ER = eig_tr_r(ctx, A) ER = Q * ER if left and (not right): return (E, EL) if right and (not left): return (E, ER) return (E, EL, ER) @defun def eig_sort(ctx, E, EL = False, ER = False, f = "real"): """ This routine sorts the eigenvalues and eigenvectors delivered by ``eig``. parameters: E : the eigenvalues as delivered by eig EL : the left eigenvectors as delivered by eig, or false ER : the right eigenvectors as delivered by eig, or false f : either a string ("real" sort by increasing real part, "imag" sort by increasing imag part, "abs" sort by absolute value) or a function mapping complexs to the reals, i.e. ``f = lambda x: -mp.re(x) `` would sort the eigenvalues by decreasing real part. return values: E if EL and ER are both false. (E, ER) if ER is not false and left is false. (E, EL) if EL is not false and right is false. (E, EL, ER) if EL and ER are not false. example: >>> from mpmath import mp >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) >>> E, EL, ER = mp.eig(A,left = True, right = True) >>> E, EL, ER = mp.eig_sort(E, EL, ER) >>> mp.nprint(E) [2.0, 4.0, 9.0] >>> E, EL, ER = mp.eig_sort(E, EL, ER,f = lambda x: -mp.re(x)) >>> mp.nprint(E) [9.0, 4.0, 2.0] >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) [0.0] [0.0] [0.0] >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) [0.0 0.0 0.0] """ if isinstance(f, str): if f == "real": f = ctx.re elif f == "imag": f = ctx.im elif cmp == "abs": f = abs else: raise RuntimeError("unknown function %s" % f) n = len(E) # Sort eigenvalues (bubble-sort) for i in xrange(n): imax = i s = f(E[i]) # s is the current maximal element for j in xrange(i + 1, n): c = f(E[j]) if c < s: s = c imax = j if imax != i: # swap eigenvalues z = E[i] E[i] = E[imax] E[imax] = z if not isinstance(EL, bool): for j in xrange(n): z = EL[i,j] EL[i,j] = EL[imax,j] EL[imax,j] = z if not isinstance(ER, bool): for j in xrange(n): z = ER[j,i] ER[j,i] = ER[j,imax] ER[j,imax] = z if isinstance(EL, bool) and isinstance(ER, bool): return E if isinstance(EL, bool) and not(isinstance(ER, bool)): return (E, ER) if isinstance(ER, bool) and not(isinstance(EL, bool)): return (E, EL) return (E, EL, ER)
24,524
27.15729
126
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/matrices.py
from ..libmp.backend import xrange # TODO: interpret list as vectors (for multiplication) rowsep = '\n' colsep = ' ' class _matrix(object): """ Numerical matrix. Specify the dimensions or the data as a nested list. Elements default to zero. Use a flat list to create a column vector easily. By default, only mpf is used to store the data. You can specify another type using force_type=type. It's possible to specify None. Make sure force_type(force_type()) is fast. Creating matrices ----------------- Matrices in mpmath are implemented using dictionaries. Only non-zero values are stored, so it is cheap to represent sparse matrices. The most basic way to create one is to use the ``matrix`` class directly. You can create an empty matrix specifying the dimensions: >>> from mpmath import * >>> mp.dps = 15 >>> matrix(2) matrix( [['0.0', '0.0'], ['0.0', '0.0']]) >>> matrix(2, 3) matrix( [['0.0', '0.0', '0.0'], ['0.0', '0.0', '0.0']]) Calling ``matrix`` with one dimension will create a square matrix. To access the dimensions of a matrix, use the ``rows`` or ``cols`` keyword: >>> A = matrix(3, 2) >>> A matrix( [['0.0', '0.0'], ['0.0', '0.0'], ['0.0', '0.0']]) >>> A.rows 3 >>> A.cols 2 You can also change the dimension of an existing matrix. This will set the new elements to 0. If the new dimension is smaller than before, the concerning elements are discarded: >>> A.rows = 2 >>> A matrix( [['0.0', '0.0'], ['0.0', '0.0']]) Internally ``mpmathify`` is used every time an element is set. This is done using the syntax A[row,column], counting from 0: >>> A = matrix(2) >>> A[1,1] = 1 + 1j >>> A matrix( [['0.0', '0.0'], ['0.0', mpc(real='1.0', imag='1.0')]]) You can use the keyword ``force_type`` to change the function which is called on every new element: >>> matrix(2, 5, force_type=int) # doctest: +SKIP matrix( [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) A more comfortable way to create a matrix lets you use nested lists: >>> matrix([[1, 2], [3, 4]]) matrix( [['1.0', '2.0'], ['3.0', '4.0']]) If you want to preserve the type of the elements you can use ``force_type=None``: >>> matrix([[1, 2.5], [1j, mpf(2)]], force_type=None) matrix( [['1.0', '2.5'], [mpc(real='0.0', imag='1.0'), '2.0']]) Convenient advanced functions are available for creating various standard matrices, see ``zeros``, ``ones``, ``diag``, ``eye``, ``randmatrix`` and ``hilbert``. Vectors ....... Vectors may also be represented by the ``matrix`` class (with rows = 1 or cols = 1). For vectors there are some things which make life easier. A column vector can be created using a flat list, a row vectors using an almost flat nested list:: >>> matrix([1, 2, 3]) matrix( [['1.0'], ['2.0'], ['3.0']]) >>> matrix([[1, 2, 3]]) matrix( [['1.0', '2.0', '3.0']]) Optionally vectors can be accessed like lists, using only a single index:: >>> x = matrix([1, 2, 3]) >>> x[1] mpf('2.0') >>> x[1,0] mpf('2.0') Other ..... Like you probably expected, matrices can be printed:: >>> print randmatrix(3) # doctest:+SKIP [ 0.782963853573023 0.802057689719883 0.427895717335467] [0.0541876859348597 0.708243266653103 0.615134039977379] [ 0.856151514955773 0.544759264818486 0.686210904770947] Use ``nstr`` or ``nprint`` to specify the number of digits to print:: >>> nprint(randmatrix(5), 3) # doctest:+SKIP [2.07e-1 1.66e-1 5.06e-1 1.89e-1 8.29e-1] [6.62e-1 6.55e-1 4.47e-1 4.82e-1 2.06e-2] [4.33e-1 7.75e-1 6.93e-2 2.86e-1 5.71e-1] [1.01e-1 2.53e-1 6.13e-1 3.32e-1 2.59e-1] [1.56e-1 7.27e-2 6.05e-1 6.67e-2 2.79e-1] As matrices are mutable, you will need to copy them sometimes:: >>> A = matrix(2) >>> A matrix( [['0.0', '0.0'], ['0.0', '0.0']]) >>> B = A.copy() >>> B[0,0] = 1 >>> B matrix( [['1.0', '0.0'], ['0.0', '0.0']]) >>> A matrix( [['0.0', '0.0'], ['0.0', '0.0']]) Finally, it is possible to convert a matrix to a nested list. This is very useful, as most Python libraries involving matrices or arrays (namely NumPy or SymPy) support this format:: >>> B.tolist() [[mpf('1.0'), mpf('0.0')], [mpf('0.0'), mpf('0.0')]] Matrix operations ----------------- You can add and subtract matrices of compatible dimensions:: >>> A = matrix([[1, 2], [3, 4]]) >>> B = matrix([[-2, 4], [5, 9]]) >>> A + B matrix( [['-1.0', '6.0'], ['8.0', '13.0']]) >>> A - B matrix( [['3.0', '-2.0'], ['-2.0', '-5.0']]) >>> A + ones(3) # doctest:+ELLIPSIS Traceback (most recent call last): ... ValueError: incompatible dimensions for addition It is possible to multiply or add matrices and scalars. In the latter case the operation will be done element-wise:: >>> A * 2 matrix( [['2.0', '4.0'], ['6.0', '8.0']]) >>> A / 4 matrix( [['0.25', '0.5'], ['0.75', '1.0']]) >>> A - 1 matrix( [['0.0', '1.0'], ['2.0', '3.0']]) Of course you can perform matrix multiplication, if the dimensions are compatible:: >>> A * B matrix( [['8.0', '22.0'], ['14.0', '48.0']]) >>> matrix([[1, 2, 3]]) * matrix([[-6], [7], [-2]]) matrix( [['2.0']]) You can raise powers of square matrices:: >>> A**2 matrix( [['7.0', '10.0'], ['15.0', '22.0']]) Negative powers will calculate the inverse:: >>> A**-1 matrix( [['-2.0', '1.0'], ['1.5', '-0.5']]) >>> A * A**-1 matrix( [['1.0', '1.0842021724855e-19'], ['-2.16840434497101e-19', '1.0']]) Matrix transposition is straightforward:: >>> A = ones(2, 3) >>> A matrix( [['1.0', '1.0', '1.0'], ['1.0', '1.0', '1.0']]) >>> A.T matrix( [['1.0', '1.0'], ['1.0', '1.0'], ['1.0', '1.0']]) Norms ..... Sometimes you need to know how "large" a matrix or vector is. Due to their multidimensional nature it's not possible to compare them, but there are several functions to map a matrix or a vector to a positive real number, the so called norms. For vectors the p-norm is intended, usually the 1-, the 2- and the oo-norm are used. >>> x = matrix([-10, 2, 100]) >>> norm(x, 1) mpf('112.0') >>> norm(x, 2) mpf('100.5186549850325') >>> norm(x, inf) mpf('100.0') Please note that the 2-norm is the most used one, though it is more expensive to calculate than the 1- or oo-norm. It is possible to generalize some vector norms to matrix norm:: >>> A = matrix([[1, -1000], [100, 50]]) >>> mnorm(A, 1) mpf('1050.0') >>> mnorm(A, inf) mpf('1001.0') >>> mnorm(A, 'F') mpf('1006.2310867787777') The last norm (the "Frobenius-norm") is an approximation for the 2-norm, which is hard to calculate and not available. The Frobenius-norm lacks some mathematical properties you might expect from a norm. """ def __init__(self, *args, **kwargs): self.__data = {} # LU decompostion cache, this is useful when solving the same system # multiple times, when calculating the inverse and when calculating the # determinant self._LU = None convert = kwargs.get('force_type', self.ctx.convert) if not convert: convert = lambda x: x if isinstance(args[0], (list, tuple)): if isinstance(args[0][0], (list, tuple)): # interpret nested list as matrix A = args[0] self.__rows = len(A) self.__cols = len(A[0]) for i, row in enumerate(A): for j, a in enumerate(row): self[i, j] = convert(a) else: # interpret list as row vector v = args[0] self.__rows = len(v) self.__cols = 1 for i, e in enumerate(v): self[i, 0] = e elif isinstance(args[0], int): # create empty matrix of given dimensions if len(args) == 1: self.__rows = self.__cols = args[0] else: if not isinstance(args[1], int): raise TypeError("expected int") self.__rows = args[0] self.__cols = args[1] elif isinstance(args[0], _matrix): A = args[0].copy() self.__data = A._matrix__data self.__rows = A._matrix__rows self.__cols = A._matrix__cols for i in xrange(A.__rows): for j in xrange(A.__cols): A[i,j] = convert(A[i,j]) elif hasattr(args[0], 'tolist'): A = self.ctx.matrix(args[0].tolist()) self.__data = A._matrix__data self.__rows = A._matrix__rows self.__cols = A._matrix__cols else: raise TypeError('could not interpret given arguments') def apply(self, f): """ Return a copy of self with the function `f` applied elementwise. """ new = self.ctx.matrix(self.__rows, self.__cols) for i in xrange(self.__rows): for j in xrange(self.__cols): new[i,j] = f(self[i,j]) return new def __nstr__(self, n=None, **kwargs): # Build table of string representations of the elements res = [] # Track per-column max lengths for pretty alignment maxlen = [0] * self.cols for i in range(self.rows): res.append([]) for j in range(self.cols): if n: string = self.ctx.nstr(self[i,j], n, **kwargs) else: string = str(self[i,j]) res[-1].append(string) maxlen[j] = max(len(string), maxlen[j]) # Patch strings together for i, row in enumerate(res): for j, elem in enumerate(row): # Pad each element up to maxlen so the columns line up row[j] = elem.rjust(maxlen[j]) res[i] = "[" + colsep.join(row) + "]" return rowsep.join(res) def __str__(self): return self.__nstr__() def _toliststr(self, avoid_type=False): """ Create a list string from a matrix. If avoid_type: avoid multiple 'mpf's. """ # XXX: should be something like self.ctx._types typ = self.ctx.mpf s = '[' for i in xrange(self.__rows): s += '[' for j in xrange(self.__cols): if not avoid_type or not isinstance(self[i,j], typ): a = repr(self[i,j]) else: a = "'" + str(self[i,j]) + "'" s += a + ', ' s = s[:-2] s += '],\n ' s = s[:-3] s += ']' return s def tolist(self): """ Convert the matrix to a nested list. """ return [[self[i,j] for j in range(self.__cols)] for i in range(self.__rows)] def __repr__(self): if self.ctx.pretty: return self.__str__() s = 'matrix(\n' s += self._toliststr(avoid_type=True) + ')' return s def __get_element(self, key): ''' Fast extraction of the i,j element from the matrix This function is for private use only because is unsafe: 1. Does not check on the value of key it expects key to be a integer tuple (i,j) 2. Does not check bounds ''' if key in self.__data: return self.__data[key] else: return self.ctx.zero def __set_element(self, key, value): ''' Fast assignment of the i,j element in the matrix This function is unsafe: 1. Does not check on the value of key it expects key to be a integer tuple (i,j) 2. Does not check bounds 3. Does not check the value type ''' if value: # only store non-zeros self.__data[key] = value elif key in self.__data: del self.__data[key] def __getitem__(self, key): ''' Getitem function for mp matrix class with slice index enabled it allows the following assingments scalar to a slice of the matrix B = A[:,2:6] ''' # Convert vector to matrix indexing if isinstance(key, int) or isinstance(key,slice): # only sufficent for vectors if self.__rows == 1: key = (0, key) elif self.__cols == 1: key = (key, 0) else: raise IndexError('insufficient indices for matrix') if isinstance(key[0],slice) or isinstance(key[1],slice): #Rows if isinstance(key[0],slice): #Check bounds if (key[0].start is None or key[0].start >= 0) and \ (key[0].stop is None or key[0].stop <= self.__rows+1): # Generate indices rows = xrange(*key[0].indices(self.__rows)) else: raise IndexError('Row index out of bounds') else: # Single row rows = [key[0]] # Columns if isinstance(key[1],slice): # Check bounds if (key[1].start is None or key[1].start >= 0) and \ (key[1].stop is None or key[1].stop <= self.__cols+1): # Generate indices columns = xrange(*key[1].indices(self.__cols)) else: raise IndexError('Column index out of bounds') else: # Single column columns = [key[1]] # Create matrix slice m = self.ctx.matrix(len(rows),len(columns)) # Assign elements to the output matrix for i,x in enumerate(rows): for j,y in enumerate(columns): m.__set_element((i,j),self.__get_element((x,y))) return m else: # single element extraction if key[0] >= self.__rows or key[1] >= self.__cols: raise IndexError('matrix index out of range') if key in self.__data: return self.__data[key] else: return self.ctx.zero def __setitem__(self, key, value): # setitem function for mp matrix class with slice index enabled # it allows the following assingments # scalar to a slice of the matrix # A[:,2:6] = 2.5 # submatrix to matrix (the value matrix should be the same size as the slice size) # A[3,:] = B where A is n x m and B is n x 1 # Convert vector to matrix indexing if isinstance(key, int) or isinstance(key,slice): # only sufficent for vectors if self.__rows == 1: key = (0, key) elif self.__cols == 1: key = (key, 0) else: raise IndexError('insufficient indices for matrix') # Slice indexing if isinstance(key[0],slice) or isinstance(key[1],slice): # Rows if isinstance(key[0],slice): # Check bounds if (key[0].start is None or key[0].start >= 0) and \ (key[0].stop is None or key[0].stop <= self.__rows+1): # generate row indices rows = xrange(*key[0].indices(self.__rows)) else: raise IndexError('Row index out of bounds') else: # Single row rows = [key[0]] # Columns if isinstance(key[1],slice): # Check bounds if (key[1].start is None or key[1].start >= 0) and \ (key[1].stop is None or key[1].stop <= self.__cols+1): # Generate column indices columns = xrange(*key[1].indices(self.__cols)) else: raise IndexError('Column index out of bounds') else: # Single column columns = [key[1]] # Assign slice with a scalar if isinstance(value,self.ctx.matrix): # Assign elements to matrix if input and output dimensions match if len(rows) == value.rows and len(columns) == value.cols: for i,x in enumerate(rows): for j,y in enumerate(columns): self.__set_element((x,y), value.__get_element((i,j))) else: raise ValueError('Dimensions do not match') else: # Assign slice with scalars value = self.ctx.convert(value) for i in rows: for j in columns: self.__set_element((i,j), value) else: # Single element assingment # Check bounds if key[0] >= self.__rows or key[1] >= self.__cols: raise IndexError('matrix index out of range') # Convert and store value value = self.ctx.convert(value) if value: # only store non-zeros self.__data[key] = value elif key in self.__data: del self.__data[key] if self._LU: self._LU = None return def __iter__(self): for i in xrange(self.__rows): for j in xrange(self.__cols): yield self[i,j] def __mul__(self, other): if isinstance(other, self.ctx.matrix): # dot multiplication TODO: use Strassen's method? if self.__cols != other.__rows: raise ValueError('dimensions not compatible for multiplication') new = self.ctx.matrix(self.__rows, other.__cols) for i in xrange(self.__rows): for j in xrange(other.__cols): new[i, j] = self.ctx.fdot((self[i,k], other[k,j]) for k in xrange(other.__rows)) return new else: # try scalar multiplication new = self.ctx.matrix(self.__rows, self.__cols) for i in xrange(self.__rows): for j in xrange(self.__cols): new[i, j] = other * self[i, j] return new def __rmul__(self, other): # assume other is scalar and thus commutative if isinstance(other, self.ctx.matrix): raise TypeError("other should not be type of ctx.matrix") return self.__mul__(other) def __pow__(self, other): # avoid cyclic import problems #from linalg import inverse if not isinstance(other, int): raise ValueError('only integer exponents are supported') if not self.__rows == self.__cols: raise ValueError('only powers of square matrices are defined') n = other if n == 0: return self.ctx.eye(self.__rows) if n < 0: n = -n neg = True else: neg = False i = n y = 1 z = self.copy() while i != 0: if i % 2 == 1: y = y * z z = z*z i = i // 2 if neg: y = self.ctx.inverse(y) return y def __div__(self, other): # assume other is scalar and do element-wise divison assert not isinstance(other, self.ctx.matrix) new = self.ctx.matrix(self.__rows, self.__cols) for i in xrange(self.__rows): for j in xrange(self.__cols): new[i,j] = self[i,j] / other return new __truediv__ = __div__ def __add__(self, other): if isinstance(other, self.ctx.matrix): if not (self.__rows == other.__rows and self.__cols == other.__cols): raise ValueError('incompatible dimensions for addition') new = self.ctx.matrix(self.__rows, self.__cols) for i in xrange(self.__rows): for j in xrange(self.__cols): new[i,j] = self[i,j] + other[i,j] return new else: # assume other is scalar and add element-wise new = self.ctx.matrix(self.__rows, self.__cols) for i in xrange(self.__rows): for j in xrange(self.__cols): new[i,j] += self[i,j] + other return new def __radd__(self, other): return self.__add__(other) def __sub__(self, other): if isinstance(other, self.ctx.matrix) and not (self.__rows == other.__rows and self.__cols == other.__cols): raise ValueError('incompatible dimensions for substraction') return self.__add__(other * (-1)) def __neg__(self): return (-1) * self def __rsub__(self, other): return -self + other def __eq__(self, other): return self.__rows == other.__rows and self.__cols == other.__cols \ and self.__data == other.__data def __len__(self): if self.rows == 1: return self.cols elif self.cols == 1: return self.rows else: return self.rows # do it like numpy def __getrows(self): return self.__rows def __setrows(self, value): for key in self.__data.copy(): if key[0] >= value: del self.__data[key] self.__rows = value rows = property(__getrows, __setrows, doc='number of rows') def __getcols(self): return self.__cols def __setcols(self, value): for key in self.__data.copy(): if key[1] >= value: del self.__data[key] self.__cols = value cols = property(__getcols, __setcols, doc='number of columns') def transpose(self): new = self.ctx.matrix(self.__cols, self.__rows) for i in xrange(self.__rows): for j in xrange(self.__cols): new[j,i] = self[i,j] return new T = property(transpose) def conjugate(self): return self.apply(self.ctx.conj) def transpose_conj(self): return self.conjugate().transpose() H = property(transpose_conj) def copy(self): new = self.ctx.matrix(self.__rows, self.__cols) new.__data = self.__data.copy() return new __copy__ = copy def column(self, n): m = self.ctx.matrix(self.rows, 1) for i in range(self.rows): m[i] = self[i,n] return m class MatrixMethods(object): def __init__(ctx): # XXX: subclass ctx.matrix = type('matrix', (_matrix,), {}) ctx.matrix.ctx = ctx ctx.matrix.convert = ctx.convert def eye(ctx, n, **kwargs): """ Create square identity matrix n x n. """ A = ctx.matrix(n, **kwargs) for i in xrange(n): A[i,i] = 1 return A def diag(ctx, diagonal, **kwargs): """ Create square diagonal matrix using given list. Example: >>> from mpmath import diag, mp >>> mp.pretty = False >>> diag([1, 2, 3]) matrix( [['1.0', '0.0', '0.0'], ['0.0', '2.0', '0.0'], ['0.0', '0.0', '3.0']]) """ A = ctx.matrix(len(diagonal), **kwargs) for i in xrange(len(diagonal)): A[i,i] = diagonal[i] return A def zeros(ctx, *args, **kwargs): """ Create matrix m x n filled with zeros. One given dimension will create square matrix n x n. Example: >>> from mpmath import zeros, mp >>> mp.pretty = False >>> zeros(2) matrix( [['0.0', '0.0'], ['0.0', '0.0']]) """ if len(args) == 1: m = n = args[0] elif len(args) == 2: m = args[0] n = args[1] else: raise TypeError('zeros expected at most 2 arguments, got %i' % len(args)) A = ctx.matrix(m, n, **kwargs) for i in xrange(m): for j in xrange(n): A[i,j] = 0 return A def ones(ctx, *args, **kwargs): """ Create matrix m x n filled with ones. One given dimension will create square matrix n x n. Example: >>> from mpmath import ones, mp >>> mp.pretty = False >>> ones(2) matrix( [['1.0', '1.0'], ['1.0', '1.0']]) """ if len(args) == 1: m = n = args[0] elif len(args) == 2: m = args[0] n = args[1] else: raise TypeError('ones expected at most 2 arguments, got %i' % len(args)) A = ctx.matrix(m, n, **kwargs) for i in xrange(m): for j in xrange(n): A[i,j] = 1 return A def hilbert(ctx, m, n=None): """ Create (pseudo) hilbert matrix m x n. One given dimension will create hilbert matrix n x n. The matrix is very ill-conditioned and symmetric, positive definite if square. """ if n is None: n = m A = ctx.matrix(m, n) for i in xrange(m): for j in xrange(n): A[i,j] = ctx.one / (i + j + 1) return A def randmatrix(ctx, m, n=None, min=0, max=1, **kwargs): """ Create a random m x n matrix. All values are >= min and <max. n defaults to m. Example: >>> from mpmath import randmatrix >>> randmatrix(2) # doctest:+SKIP matrix( [['0.53491598236191806', '0.57195669543302752'], ['0.85589992269513615', '0.82444367501382143']]) """ if not n: n = m A = ctx.matrix(m, n, **kwargs) for i in xrange(m): for j in xrange(n): A[i,j] = ctx.rand() * (max - min) + min return A def swap_row(ctx, A, i, j): """ Swap row i with row j. """ if i == j: return if isinstance(A, ctx.matrix): for k in xrange(A.cols): A[i,k], A[j,k] = A[j,k], A[i,k] elif isinstance(A, list): A[i], A[j] = A[j], A[i] else: raise TypeError('could not interpret type') def extend(ctx, A, b): """ Extend matrix A with column b and return result. """ if not isinstance(A, ctx.matrix): raise TypeError("A should be a type of ctx.matrix") if A.rows != len(b): raise ValueError("Value should be equal to len(b)") A = A.copy() A.cols += 1 for i in xrange(A.rows): A[i, A.cols-1] = b[i] return A def norm(ctx, x, p=2): r""" Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`. Special cases: If *x* is not iterable, this just returns ``absmax(x)``. ``p=1`` gives the sum of absolute values. ``p=2`` is the standard Euclidean vector norm. ``p=inf`` gives the magnitude of the largest element. For *x* a matrix, ``p=2`` is the Frobenius norm. For operator matrix norms, use :func:`~mpmath.mnorm` instead. You can use the string 'inf' as well as float('inf') or mpf('inf') to specify the infinity norm. **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> x = matrix([-10, 2, 100]) >>> norm(x, 1) mpf('112.0') >>> norm(x, 2) mpf('100.5186549850325') >>> norm(x, inf) mpf('100.0') """ try: iter(x) except TypeError: return ctx.absmax(x) if type(p) is not int: p = ctx.convert(p) if p == ctx.inf: return max(ctx.absmax(i) for i in x) elif p == 1: return ctx.fsum(x, absolute=1) elif p == 2: return ctx.sqrt(ctx.fsum(x, absolute=1, squared=1)) elif p > 1: return ctx.nthroot(ctx.fsum(abs(i)**p for i in x), p) else: raise ValueError('p has to be >= 1') def mnorm(ctx, A, p=1): r""" Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf`` are supported: ``p=1`` gives the 1-norm (maximal column sum) ``p=inf`` gives the `\infty`-norm (maximal row sum). You can use the string 'inf' as well as float('inf') or mpf('inf') ``p=2`` (not implemented) for a square matrix is the usual spectral matrix norm, i.e. the largest singular value. ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an approximation of the spectral norm and satisfies .. math :: \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F The Frobenius norm lacks some mathematical properties that might be expected of a norm. For general elementwise `p`-norms, use :func:`~mpmath.norm` instead. **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = False >>> A = matrix([[1, -1000], [100, 50]]) >>> mnorm(A, 1) mpf('1050.0') >>> mnorm(A, inf) mpf('1001.0') >>> mnorm(A, 'F') mpf('1006.2310867787777') """ A = ctx.matrix(A) if type(p) is not int: if type(p) is str and 'frobenius'.startswith(p.lower()): return ctx.norm(A, 2) p = ctx.convert(p) m, n = A.rows, A.cols if p == 1: return max(ctx.fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n)) elif p == ctx.inf: return max(ctx.fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m)) else: raise NotImplementedError("matrix p-norm for arbitrary p") if __name__ == '__main__': import doctest doctest.testmod()
31,596
30.787726
96
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/eigen_symmetric.py
#!/usr/bin/python # -*- coding: utf-8 -*- ################################################################################################## # module for the symmetric eigenvalue problem # Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) # # todo: # - implement balancing # ################################################################################################## """ The symmetric eigenvalue problem. --------------------------------- This file contains routines for the symmetric eigenvalue problem. high level routines: eigsy : real symmetric (ordinary) eigenvalue problem eighe : complex hermitian (ordinary) eigenvalue problem eigh : unified interface for eigsy and eighe svd_r : singular value decomposition for real matrices svd_c : singular value decomposition for complex matrices svd : unified interface for svd_r and svd_c low level routines: r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0 c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0 tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem svd_r_raw : raw singular value decomposition for real matrices svd_c_raw : raw singular value decomposition for complex matrices """ from ..libmp.backend import xrange from .eigen import defun def r_sy_tridiag(ctx, A, D, E, calc_ev = True): """ This routine transforms a real symmetric matrix A to a real symmetric tridiagonal matrix T using an orthogonal similarity transformation: Q' * A * Q = T (here ' denotes the matrix transpose). The orthogonal matrix Q is build up from Householder reflectors. parameters: A (input/output) On input, A contains the real symmetric matrix of dimension (n,n). On output, if calc_ev is true, A contains the orthogonal matrix Q, otherwise A is destroyed. D (output) real array of length n, contains the diagonal elements of the tridiagonal matrix E (output) real array of length n, contains the offdiagonal elements of the tridiagonal matrix in E[0:(n-1)] where is the dimension of the matrix A. E[n-1] is undefined. calc_ev (input) If calc_ev is true, this routine explicitly calculates the orthogonal matrix Q which is then returned in A. If calc_ev is false, Q is not explicitly calculated resulting in a shorter run time. This routine is a python translation of the fortran routine tred2.f in the software library EISPACK (see netlib.org) which itself is based on the algol procedure tred2 described in: - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) For a good introduction to Householder reflections, see also Stoer, Bulirsch - Introduction to Numerical Analysis. """ # note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i] # whereas v/<v,v> is stored in a[i,(i+1):] n = A.rows for i in xrange(n - 1, 0, -1): # scale the vector scale = 0 for k in xrange(0, i): scale += abs(A[k,i]) scale_inv = 0 if scale != 0: scale_inv = 1/scale # sadly there are floating point numbers not equal to zero whose reciprocal is infinity if i == 1 or scale == 0 or ctx.isinf(scale_inv): E[i] = A[i-1,i] # nothing to do D[i] = 0 continue # calculate parameters for housholder transformation H = 0 for k in xrange(0, i): A[k,i] *= scale_inv H += A[k,i] * A[k,i] F = A[i-1,i] G = ctx.sqrt(H) if F > 0: G = -G E[i] = scale * G H -= F * G A[i-1,i] = F - G F = 0 # apply housholder transformation for j in xrange(0, i): if calc_ev: A[i,j] = A[j,i] / H G = 0 # calculate A*U for k in xrange(0, j + 1): G += A[k,j] * A[k,i] for k in xrange(j + 1, i): G += A[j,k] * A[k,i] E[j] = G / H # calculate P F += E[j] * A[j,i] HH = F / (2 * H) for j in xrange(0, i): # calculate reduced A F = A[j,i] G = E[j] - HH * F # calculate Q E[j] = G for k in xrange(0, j + 1): A[k,j] -= F * E[k] + G * A[k,i] D[i] = H for i in xrange(1, n): # better for compatibility E[i-1] = E[i] E[n-1] = 0 if calc_ev: D[0] = 0 for i in xrange(0, n): if D[i] != 0: for j in xrange(0, i): # accumulate transformation matrices G = 0 for k in xrange(0, i): G += A[i,k] * A[k,j] for k in xrange(0, i): A[k,j] -= G * A[k,i] D[i] = A[i,i] A[i,i] = 1 for j in xrange(0, i): A[j,i] = A[i,j] = 0 else: for i in xrange(0, n): D[i] = A[i,i] def c_he_tridiag_0(ctx, A, D, E, T): """ This routine transforms a complex hermitian matrix A to a real symmetric tridiagonal matrix T using an unitary similarity transformation: Q' * A * Q = T (here ' denotes the hermitian matrix transpose, i.e. transposition und conjugation). The unitary matrix Q is build up from Householder reflectors and an unitary diagonal matrix. parameters: A (input/output) On input, A contains the complex hermitian matrix of dimension (n,n). On output, A contains the unitary matrix Q in compressed form. D (output) real array of length n, contains the diagonal elements of the tridiagonal matrix. E (output) real array of length n, contains the offdiagonal elements of the tridiagonal matrix in E[0:(n-1)] where is the dimension of the matrix A. E[n-1] is undefined. T (output) complex array of length n, contains a unitary diagonal matrix. This routine is a python translation (in slightly modified form) of the fortran routine htridi.f in the software library EISPACK (see netlib.org) which itself is a complex version of the algol procedure tred1 described in: - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) For a good introduction to Householder reflections, see also Stoer, Bulirsch - Introduction to Numerical Analysis. """ n = A.rows T[n-1] = 1 for i in xrange(n - 1, 0, -1): # scale the vector scale = 0 for k in xrange(0, i): scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i])) scale_inv = 0 if scale != 0: scale_inv = 1 / scale # sadly there are floating point numbers not equal to zero whose reciprocal is infinity if scale == 0 or ctx.isinf(scale_inv): E[i] = 0 D[i] = 0 T[i-1] = 1 continue if i == 1: F = A[i-1,i] f = abs(F) E[i] = f D[i] = 0 if f != 0: T[i-1] = T[i] * F / f else: T[i-1] = T[i] continue # calculate parameters for housholder transformation H = 0 for k in xrange(0, i): A[k,i] *= scale_inv rr = ctx.re(A[k,i]) ii = ctx.im(A[k,i]) H += rr * rr + ii * ii F = A[i-1,i] f = abs(F) G = ctx.sqrt(H) H += G * f E[i] = scale * G if f != 0: F = F / f TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage G *= F else: TZ = -T[i] # T[i-1]=-T[i] A[i-1,i] += G F = 0 # apply housholder transformation for j in xrange(0, i): A[i,j] = A[j,i] / H G = 0 # calculate A*U for k in xrange(0, j + 1): G += ctx.conj(A[k,j]) * A[k,i] for k in xrange(j + 1, i): G += A[j,k] * A[k,i] T[j] = G / H # calculate P F += ctx.conj(T[j]) * A[j,i] HH = F / (2 * H) for j in xrange(0, i): # calculate reduced A F = A[j,i] G = T[j] - HH * F # calculate Q T[j] = G for k in xrange(0, j + 1): A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i] # as we use the lower left part for storage # we have to use the transpose of the normal formula T[i-1] = TZ D[i] = H for i in xrange(1, n): # better for compatibility E[i-1] = E[i] E[n-1] = 0 D[0] = 0 for i in xrange(0, n): zw = D[i] D[i] = ctx.re(A[i,i]) A[i,i] = zw def c_he_tridiag_1(ctx, A, T): """ This routine forms the unitary matrix Q described in c_he_tridiag_0. parameters: A (input/output) On input, A is the same matrix as delivered by c_he_tridiag_0. On output, A is set to Q. T (input) On input, T is the same array as delivered by c_he_tridiag_0. """ n = A.rows for i in xrange(0, n): if A[i,i] != 0: for j in xrange(0, i): G = 0 for k in xrange(0, i): G += ctx.conj(A[i,k]) * A[k,j] for k in xrange(0, i): A[k,j] -= G * A[k,i] A[i,i] = 1 for j in xrange(0, i): A[j,i] = A[i,j] = 0 for i in xrange(0, n): for k in xrange(0, n): A[i,k] *= T[k] def c_he_tridiag_2(ctx, A, T, B): """ This routine applied the unitary matrix Q described in c_he_tridiag_0 onto the the matrix B, i.e. it forms Q*B. parameters: A (input) On input, A is the same matrix as delivered by c_he_tridiag_0. T (input) On input, T is the same array as delivered by c_he_tridiag_0. B (input/output) On input, B is a complex matrix. On output B is replaced by Q*B. This routine is a python translation of the fortran routine htribk.f in the software library EISPACK (see netlib.org). See c_he_tridiag_0 for more references. """ n = A.rows for i in xrange(0, n): for k in xrange(0, n): B[k,i] *= T[k] for i in xrange(0, n): if A[i,i] != 0: for j in xrange(0, n): G = 0 for k in xrange(0, i): G += ctx.conj(A[i,k]) * B[k,j] for k in xrange(0, i): B[k,j] -= G * A[k,i] def tridiag_eigen(ctx, d, e, z = False): """ This subroutine find the eigenvalues and the first components of the eigenvectors of a real symmetric tridiagonal matrix using the implicit QL method. parameters: d (input/output) real array of length n. on input, d contains the diagonal elements of the input matrix. on output, d contains the eigenvalues in ascending order. e (input) real array of length n. on input, e contains the offdiagonal elements of the input matrix in e[0:(n-1)]. On output, e has been destroyed. z (input/output) If z is equal to False, no eigenvectors will be computed. Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or complex matrix of dimension (m,n) ). On output this matrix will be multiplied by the matrix of the eigenvectors (i.e. the columns of this matrix are the eigenvectors): z --> z*EV That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output z will contain the first m components of the eigenvectors. That means if m is equal to n, the i-th eigenvector will be z[:,i]. This routine is a python translation (in slightly modified form) of the fortran routine imtql2.f in the software library EISPACK (see netlib.org) which itself is based on the algol procudure imtql2 desribed in: - num. math. 12, p. 377-383(1968) by matrin and wilkinson - modified in num. math. 15, p. 450(1970) by dubrulle - handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971) See also the routine gaussq.f in netlog.org or acm algorithm 726. """ n = len(d) e[n-1] = 0 iterlim = 2 * ctx.dps for l in xrange(n): j = 0 while 1: m = l while 1: # look for a small subdiagonal element if m + 1 == n: break if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])): break m = m + 1 if m == l: break if j >= iterlim: raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim) j += 1 # form shift p = d[l] g = (d[l + 1] - p) / (2 * e[l]) r = ctx.hypot(g, 1) if g < 0: s = g - r else: s = g + r g = d[m] - p + e[l] / s s, c, p = 1, 1, 0 for i in xrange(m - 1, l - 1, -1): f = s * e[i] b = c * e[i] if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726. c = g / f r = ctx.hypot(c, 1) e[i + 1] = f * r s = 1 / r c = c * s else: s = f / g r = ctx.hypot(s, 1) e[i + 1] = g * r c = 1 / r s = s * c g = d[i + 1] - p r = (d[i] - g) * s + 2 * c * b p = s * r d[i + 1] = g + p g = c * r - b if not isinstance(z, bool): # calculate eigenvectors for w in xrange(z.rows): f = z[w,i+1] z[w,i+1] = s * z[w,i] + c * f z[w,i ] = c * z[w,i] - s * f d[l] = d[l] - p e[l] = g e[m] = 0 for ii in xrange(1, n): # sort eigenvalues and eigenvectors (bubble-sort) i = ii - 1 k = i p = d[i] for j in xrange(ii, n): if d[j] >= p: continue k = j p = d[k] if k == i: continue d[k] = d[i] d[i] = p if not isinstance(z, bool): for w in xrange(z.rows): p = z[w,i] z[w,i] = z[w,k] z[w,k] = p ######################################################################################## @defun def eigsy(ctx, A, eigvals_only = False, overwrite_a = False): """ This routine solves the (ordinary) eigenvalue problem for a real symmetric square matrix A. Given A, an orthogonal matrix Q is calculated which diagonalizes A: Q' A Q = diag(E) and Q Q' = Q' Q = 1 Here diag(E) is a diagonal matrix whose diagonal is E. ' denotes the transpose. The columns of Q are the eigenvectors of A and E contains the eigenvalues: A Q[:,i] = E[i] Q[:,i] input: A: real matrix of format (n,n) which is symmetric (i.e. A=A' or A[i,j]=A[j,i]) eigvals_only: if true, calculates only the eigenvalues E. if false, calculates both eigenvectors and eigenvalues. overwrite_a: if true, allows modification of A which may improve performance. if false, A is not modified. output: E: vector of format (n). contains the eigenvalues of A in ascending order. Q: orthogonal matrix of format (n,n). contains the eigenvectors of A as columns. return value: E if eigvals_only is true (E, Q) if eigvals_only is false example: >>> from mpmath import mp >>> A = mp.matrix([[3, 2], [2, 0]]) >>> E = mp.eigsy(A, eigvals_only = True) >>> print(E) [-1.0] [ 4.0] >>> A = mp.matrix([[1, 2], [2, 3]]) >>> E, Q = mp.eigsy(A) >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) [0.0] [0.0] see also: eighe, eigh, eig """ if not overwrite_a: A = A.copy() d = ctx.zeros(A.rows, 1) e = ctx.zeros(A.rows, 1) if eigvals_only: r_sy_tridiag(ctx, A, d, e, calc_ev = False) tridiag_eigen(ctx, d, e, False) return d else: r_sy_tridiag(ctx, A, d, e, calc_ev = True) tridiag_eigen(ctx, d, e, A) return (d, A) @defun def eighe(ctx, A, eigvals_only = False, overwrite_a = False): """ This routine solves the (ordinary) eigenvalue problem for a complex hermitian square matrix A. Given A, an unitary matrix Q is calculated which diagonalizes A: Q' A Q = diag(E) and Q Q' = Q' Q = 1 Here diag(E) a is diagonal matrix whose diagonal is E. ' denotes the hermitian transpose (i.e. ordinary transposition and complex conjugation). The columns of Q are the eigenvectors of A and E contains the eigenvalues: A Q[:,i] = E[i] Q[:,i] input: A: complex matrix of format (n,n) which is hermitian (i.e. A=A' or A[i,j]=conj(A[j,i])) eigvals_only: if true, calculates only the eigenvalues E. if false, calculates both eigenvectors and eigenvalues. overwrite_a: if true, allows modification of A which may improve performance. if false, A is not modified. output: E: vector of format (n). contains the eigenvalues of A in ascending order. Q: unitary matrix of format (n,n). contains the eigenvectors of A as columns. return value: E if eigvals_only is true (E, Q) if eigvals_only is false example: >>> from mpmath import mp >>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]]) >>> E = mp.eighe(A, eigvals_only = True) >>> print(E) [-4.0] [ 3.0] >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) >>> E, Q = mp.eighe(A) >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) [0.0] [0.0] see also: eigsy, eigh, eig """ if not overwrite_a: A = A.copy() d = ctx.zeros(A.rows, 1) e = ctx.zeros(A.rows, 1) t = ctx.zeros(A.rows, 1) if eigvals_only: c_he_tridiag_0(ctx, A, d, e, t) tridiag_eigen(ctx, d, e, False) return d else: c_he_tridiag_0(ctx, A, d, e, t) B = ctx.eye(A.rows) tridiag_eigen(ctx, d, e, B) c_he_tridiag_2(ctx, A, t, B) return (d, B) @defun def eigh(ctx, A, eigvals_only = False, overwrite_a = False): """ "eigh" is a unified interface for "eigsy" and "eighe". Depending on whether A is real or complex the appropriate function is called. This routine solves the (ordinary) eigenvalue problem for a real symmetric or complex hermitian square matrix A. Given A, an orthogonal (A real) or unitary (A complex) matrix Q is calculated which diagonalizes A: Q' A Q = diag(E) and Q Q' = Q' Q = 1 Here diag(E) a is diagonal matrix whose diagonal is E. ' denotes the hermitian transpose (i.e. ordinary transposition and complex conjugation). The columns of Q are the eigenvectors of A and E contains the eigenvalues: A Q[:,i] = E[i] Q[:,i] input: A: a real or complex square matrix of format (n,n) which is symmetric (i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])). eigvals_only: if true, calculates only the eigenvalues E. if false, calculates both eigenvectors and eigenvalues. overwrite_a: if true, allows modification of A which may improve performance. if false, A is not modified. output: E: vector of format (n). contains the eigenvalues of A in ascending order. Q: an orthogonal or unitary matrix of format (n,n). contains the eigenvectors of A as columns. return value: E if eigvals_only is true (E, Q) if eigvals_only is false example: >>> from mpmath import mp >>> A = mp.matrix([[3, 2], [2, 0]]) >>> E = mp.eigh(A, eigvals_only = True) >>> print(E) [-1.0] [ 4.0] >>> A = mp.matrix([[1, 2], [2, 3]]) >>> E, Q = mp.eigh(A) >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) [0.0] [0.0] >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) >>> E, Q = mp.eigh(A) >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) [0.0] [0.0] see also: eigsy, eighe, eig """ iscomplex = any(type(x) is ctx.mpc for x in A) if iscomplex: return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) else: return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) @defun def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0): """ This routine calulates gaussian quadrature rules for different families of orthogonal polynomials. Let (a, b) be an interval, W(x) a positive weight function and n a positive integer. Then the purpose of this routine is to calculate pairs (x_k, w_k) for k=0, 1, 2, ... (n-1) which give int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1)) exact for all polynomials F(x) of degree (strictly) less than 2*n. For all integrable functions F(x) the sum is a (more or less) good approximation to the integral. The x_k are called nodes (which are the zeros of the related orthogonal polynomials) and the w_k are called the weights. parameters n (input) The degree of the quadrature rule, i.e. its number of nodes. qtype (input) The family of orthogonal polynmomials for which to compute the quadrature rule. See the list below. alpha (input) real number, used as parameter for some orthogonal polynomials beta (input) real number, used as parameter for some orthogonal polynomials. return value (X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k]. orthogonal polynomials: qtype polynomial ----- ---------- "legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1) "legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1) "hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity) "laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity) "glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha on (0, +infinity) "chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x) on (-1, +1) "chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x) on (-1, +1) "jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1) with alpha>-1 and beta>-1 examples: >>> from mpmath import mp >>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7 >>> X, W = mp.gauss_quadrature(5, "hermite") >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) >>> B = mp.sqrt(mp.pi) * 57 / 16 >>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf]) >>> print mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10) 0.0 0.0 >>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11 >>> X, W = mp.gauss_quadrature(3, "laguerre") >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) >>> B = 76 >>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf]) >>> print mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10) 0.0 0.0 # orthogonality of the chebyshev polynomials: >>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x) >>> X, W = mp.gauss_quadrature(3, "chebyshev1") >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) >>> print(mp.chop(A, tol = 1e-10)) 0.0 references: - golub and welsch, "calculations of gaussian quadrature rules", mathematics of computation 23, p. 221-230 (1969) - golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973) - stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966) See also the routine gaussq.f in netlog.org or ACM Transactions on Mathematical Software algorithm 726. """ d = ctx.zeros(n, 1) e = ctx.zeros(n, 1) z = ctx.zeros(1, n) z[0,0] = 1 if qtype == "legendre": # legendre on the range -1 +1 , abramowitz, table 25.4, p.916 w = 2 for i in xrange(n): j = i + 1 e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1))) elif qtype == "legendre01": # legendre shifted to 0 1 , abramowitz, table 25.8, p.921 w = 1 for i in xrange(n): d[i] = 1 / ctx.mpf(2) j = i + 1 e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4))) elif qtype == "hermite": # hermite on the range -inf +inf , abramowitz, table 25.10,p.924 w = ctx.sqrt(ctx.pi) for i in xrange(n): j = i + 1 e[i] = ctx.sqrt(j / ctx.mpf(2)) elif qtype == "laguerre": # laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923 w = 1 for i in xrange(n): j = i + 1 d[i] = 2 * j - 1 e[i] = j elif qtype=="chebyshev1": # chebyshev polynimials of the first kind w = ctx.pi for i in xrange(n): e[i] = 1 / ctx.mpf(2) e[0] = ctx.sqrt(1 / ctx.mpf(2)) elif qtype == "chebyshev2": # chebyshev polynimials of the second kind w = ctx.pi / 2 for i in xrange(n): e[i] = 1 / ctx.mpf(2) elif qtype == "glaguerre": # generalized laguerre on the range 0 +inf w = ctx.gamma(1 + alpha) for i in xrange(n): j = i + 1 d[i] = 2 * j - 1 + alpha e[i] = ctx.sqrt(j * (j + alpha)) elif qtype == "jacobi": # jacobi polynomials alpha = ctx.mpf(alpha) beta = ctx.mpf(beta) ab = alpha + beta abi = ab + 2 w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi) d[0] = (beta - alpha) / abi e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi))) a2b2 = beta * beta - alpha * alpha for i in xrange(1, n): j = i + 1 abi = 2 * j + ab d[i] = a2b2 / ((abi - 2) * abi) e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi)) elif isinstance(qtype, str): raise ValueError("unknown quadrature rule \"%s\"" % qtype) elif not isinstance(qtype, str): w = qtype(d, e) else: assert 0 tridiag_eigen(ctx, d, e, z) for i in xrange(len(z)): z[i] *= z[i] z = z.transpose() return (d, w * z) ################################################################################################## ################################################################################################## ################################################################################################## def svd_r_raw(ctx, A, V = False, calc_u = False): """ This routine computes the singular value decomposition of a matrix A. Given A, two orthogonal matrices U and V are calculated such that A = U S V where S is a suitable shaped matrix whose off-diagonal elements are zero. The diagonal elements of S are the singular values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose. Householder bidiagonalization and a variant of the QR algorithm is used. overview of the matrices : A : m*n A gets replaced by U U : m*n U replaces A. If n>m then only the first m*m block of U is non-zero. column-orthogonal: U' U = B here B is a n*n matrix whose first min(m,n) diagonal elements are 1 and all other elements are zero. S : n*n diagonal matrix, only the diagonal elements are stored in the array S. only the first min(m,n) diagonal elements are non-zero. V : n*n orthogonal: V V' = V' V = 1 parameters: A (input/output) On input, A contains a real matrix of shape m*n. On output, if calc_u is true A contains the column-orthogonal matrix U; otherwise A is simply used as workspace and thus destroyed. V (input/output) if false, the matrix V is not calculated. otherwise V must be a matrix of shape n*n. calc_u (input) If true, the matrix U is calculated and replaces A. if false, U is not calculated and A is simply destroyed return value: S an array of length n containing the singular values of A sorted by decreasing magnitude. only the first min(m,n) elements are non-zero. This routine is a python translation of the fortran routine svd.f in the software library EISPACK (see netlib.org) which itself is based on the algol procedure svd described in: - num. math. 14, 403-420(1970) by golub and reinsch. - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). """ m, n = A.rows, A.cols S = ctx.zeros(n, 1) # work is a temporary array of size n work = ctx.zeros(n, 1) g = scale = anorm = 0 maxits = 3 * ctx.dps for i in xrange(n): # householder reduction to bidiagonal form work[i] = scale*g g = s = scale = 0 if i < m: for k in xrange(i, m): scale += ctx.fabs(A[k,i]) if scale != 0: for k in xrange(i, m): A[k,i] /= scale s += A[k,i] * A[k,i] f = A[i,i] g = -ctx.sqrt(s) if f < 0: g = -g h = f * g - s A[i,i] = f - g for j in xrange(i+1, n): s = 0 for k in xrange(i, m): s += A[k,i] * A[k,j] f = s / h for k in xrange(i, m): A[k,j] += f * A[k,i] for k in xrange(i,m): A[k,i] *= scale S[i] = scale * g g = s = scale = 0 if i < m and i != n - 1: for k in xrange(i+1, n): scale += ctx.fabs(A[i,k]) if scale: for k in xrange(i+1, n): A[i,k] /= scale s += A[i,k] * A[i,k] f = A[i,i+1] g = -ctx.sqrt(s) if f < 0: g = -g h = f * g - s A[i,i+1] = f - g for k in xrange(i+1, n): work[k] = A[i,k] / h for j in xrange(i+1, m): s = 0 for k in xrange(i+1, n): s += A[j,k] * A[i,k] for k in xrange(i+1, n): A[j,k] += s * work[k] for k in xrange(i+1, n): A[i,k] *= scale anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i])) if not isinstance(V, bool): for i in xrange(n-2, -1, -1): # accumulation of right hand transformations V[i+1,i+1] = 1 if work[i+1] != 0: for j in xrange(i+1, n): V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1] for j in xrange(i+1, n): s = 0 for k in xrange(i+1, n): s += A[i,k] * V[j,k] for k in xrange(i+1, n): V[j,k] += s * V[i,k] for j in xrange(i+1, n): V[j,i] = V[i,j] = 0 V[0,0] = 1 if m<n : minnm = m else : minnm = n if calc_u: for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations g = S[i] for j in xrange(i+1, n): A[i,j] = 0 if g != 0: g = 1 / g for j in xrange(i+1, n): s = 0 for k in xrange(i+1, m): s += A[k,i] * A[k,j] f = (s / A[i,i]) * g for k in xrange(i, m): A[k,j] += f * A[k,i] for j in xrange(i, m): A[j,i] *= g else: for j in xrange(i, m): A[j,i] = 0 A[i,i] += 1 for k in xrange(n - 1, -1, -1): # diagonalization of the bidiagonal form: # loop over singular values, and over allowed itations its = 0 while 1: its += 1 flag = True for l in xrange(k, -1, -1): nm = l-1 if ctx.fabs(work[l]) + anorm == anorm: flag = False break if ctx.fabs(S[nm]) + anorm == anorm: break if flag: c = 0 s = 1 for i in xrange(l, k + 1): f = s * work[i] work[i] *= c if ctx.fabs(f) + anorm == anorm: break g = S[i] h = ctx.hypot(f, g) S[i] = h h = 1 / h c = g * h s = - f * h if calc_u: for j in xrange(m): y = A[j,nm] z = A[j,i] A[j,nm] = y * c + z * s A[j,i] = z * c - y * s z = S[k] if l == k: # convergence if z < 0: # singular value is made nonnegative S[k] = -z if not isinstance(V, bool): for j in xrange(n): V[k,j] = -V[k,j] break if its >= maxits: raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) x = S[l] # shift from bottom 2 by 2 minor nm = k-1 y = S[nm] g = work[nm] h = work[k] f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y) g = ctx.hypot(f, 1) if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x c = s = 1 # next qt transformation for j in xrange(l, nm + 1): g = work[j+1] y = S[j+1] h = s * g g = c * g z = ctx.hypot(f, h) work[j] = z c = f / z s = h / z f = x * c + g * s g = g * c - x * s h = y * s y *= c if not isinstance(V, bool): for jj in xrange(n): x = V[j ,jj] z = V[j+1,jj] V[j ,jj]= x * c + z * s V[j+1 ,jj]= z * c - x * s z = ctx.hypot(f, h) S[j] = z if z != 0: # rotation can be arbitray if z=0 z = 1 / z c = f * z s = h * z f = c * g + s * y x = c * y - s * g if calc_u: for jj in xrange(m): y = A[jj,j ] z = A[jj,j+1] A[jj,j ] = y * c + z * s A[jj,j+1 ] = z * c - y * s work[l] = 0 work[k] = f S[k] = x ########################## # Sort singular values into decreasing order (bubble-sort) for i in xrange(n): imax = i s = ctx.fabs(S[i]) # s is the current maximal element for j in xrange(i + 1, n): c = ctx.fabs(S[j]) if c > s: s = c imax = j if imax != i: # swap singular values z = S[i] S[i] = S[imax] S[imax] = z if calc_u: for j in xrange(m): z = A[j,i] A[j,i] = A[j,imax] A[j,imax] = z if not isinstance(V, bool): for j in xrange(n): z = V[i,j] V[i,j] = V[imax,j] V[imax,j] = z return S ####################### def svd_c_raw(ctx, A, V = False, calc_u = False): """ This routine computes the singular value decomposition of a matrix A. Given A, two unitary matrices U and V are calculated such that A = U S V where S is a suitable shaped matrix whose off-diagonal elements are zero. The diagonal elements of S are the singular values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian transpose (i.e. transposition and conjugation). Householder bidiagonalization and a variant of the QR algorithm is used. overview of the matrices : A : m*n A gets replaced by U U : m*n U replaces A. If n>m then only the first m*m block of U is non-zero. column-unitary: U' U = B here B is a n*n matrix whose first min(m,n) diagonal elements are 1 and all other elements are zero. S : n*n diagonal matrix, only the diagonal elements are stored in the array S. only the first min(m,n) diagonal elements are non-zero. V : n*n unitary: V V' = V' V = 1 parameters: A (input/output) On input, A contains a complex matrix of shape m*n. On output, if calc_u is true A contains the column-unitary matrix U; otherwise A is simply used as workspace and thus destroyed. V (input/output) if false, the matrix V is not calculated. otherwise V must be a matrix of shape n*n. calc_u (input) If true, the matrix U is calculated and replaces A. if false, U is not calculated and A is simply destroyed return value: S an array of length n containing the singular values of A sorted by decreasing magnitude. only the first min(m,n) elements are non-zero. This routine is a python translation of the fortran routine svd.f in the software library EISPACK (see netlib.org) which itself is based on the algol procedure svd described in: - num. math. 14, 403-420(1970) by golub and reinsch. - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). """ m, n = A.rows, A.cols S = ctx.zeros(n, 1) # work is a temporary array of size n work = ctx.zeros(n, 1) lbeta = ctx.zeros(n, 1) rbeta = ctx.zeros(n, 1) dwork = ctx.zeros(n, 1) g = scale = anorm = 0 maxits = 3 * ctx.dps for i in xrange(n): # householder reduction to bidiagonal form dwork[i] = scale * g # dwork are the side-diagonal elements g = s = scale = 0 if i < m: for k in xrange(i, m): scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i])) if scale != 0: for k in xrange(i, m): A[k,i] /= scale ar = ctx.re(A[k,i]) ai = ctx.im(A[k,i]) s += ar * ar + ai * ai f = A[i,i] g = -ctx.sqrt(s) if ctx.re(f) < 0: beta = -g - ctx.conj(f) g = -g else: beta = -g + ctx.conj(f) beta /= ctx.conj(beta) beta += 1 h = 2 * (ctx.re(f) * g - s) A[i,i] = f - g beta /= h lbeta[i] = (beta / scale) / scale for j in xrange(i+1, n): s = 0 for k in xrange(i, m): s += ctx.conj(A[k,i]) * A[k,j] f = beta * s for k in xrange(i, m): A[k,j] += f * A[k,i] for k in xrange(i, m): A[k,i] *= scale S[i] = scale * g # S are the diagonal elements g = s = scale = 0 if i < m and i != n - 1: for k in xrange(i+1, n): scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k])) if scale: for k in xrange(i+1, n): A[i,k] /= scale ar = ctx.re(A[i,k]) ai = ctx.im(A[i,k]) s += ar * ar + ai * ai f = A[i,i+1] g = -ctx.sqrt(s) if ctx.re(f) < 0: beta = -g - ctx.conj(f) g = -g else: beta = -g + ctx.conj(f) beta /= ctx.conj(beta) beta += 1 h = 2 * (ctx.re(f) * g - s) A[i,i+1] = f - g beta /= h rbeta[i] = (beta / scale) / scale for k in xrange(i+1, n): work[k] = A[i, k] for j in xrange(i+1, m): s = 0 for k in xrange(i+1, n): s += ctx.conj(A[i,k]) * A[j,k] f = s * beta for k in xrange(i+1,n): A[j,k] += f * work[k] for k in xrange(i+1, n): A[i,k] *= scale anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i])) if not isinstance(V, bool): for i in xrange(n-2, -1, -1): # accumulation of right hand transformations V[i+1,i+1] = 1 if dwork[i+1] != 0: f = ctx.conj(rbeta[i]) for j in xrange(i+1, n): V[i,j] = A[i,j] * f for j in xrange(i+1, n): s = 0 for k in xrange(i+1, n): s += ctx.conj(A[i,k]) * V[j,k] for k in xrange(i+1, n): V[j,k] += s * V[i,k] for j in xrange(i+1,n): V[j,i] = V[i,j] = 0 V[0,0] = 1 if m < n : minnm = m else : minnm = n if calc_u: for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations g = S[i] for j in xrange(i+1, n): A[i,j] = 0 if g != 0: g = 1 / g for j in xrange(i+1, n): s = 0 for k in xrange(i+1, m): s += ctx.conj(A[k,i]) * A[k,j] f = s * ctx.conj(lbeta[i]) for k in xrange(i, m): A[k,j] += f * A[k,i] for j in xrange(i, m): A[j,i] *= g else: for j in xrange(i, m): A[j,i] = 0 A[i,i] += 1 for k in xrange(n-1, -1, -1): # diagonalization of the bidiagonal form: # loop over singular values, and over allowed itations its = 0 while 1: its += 1 flag = True for l in xrange(k, -1, -1): nm = l - 1 if ctx.fabs(dwork[l]) + anorm == anorm: flag = False break if ctx.fabs(S[nm]) + anorm == anorm: break if flag: c = 0 s = 1 for i in xrange(l, k+1): f = s * dwork[i] dwork[i] *= c if ctx.fabs(f) + anorm == anorm: break g = S[i] h = ctx.hypot(f, g) S[i] = h h = 1 / h c = g * h s = -f * h if calc_u: for j in xrange(m): y = A[j,nm] z = A[j,i] A[j,nm]= y * c + z * s A[j,i] = z * c - y * s z = S[k] if l == k: # convergence if z < 0: # singular value is made nonnegative S[k] = -z if not isinstance(V, bool): for j in xrange(n): V[k,j] = -V[k,j] break if its >= maxits: raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) x = S[l] # shift from bottom 2 by 2 minor nm = k-1 y = S[nm] g = dwork[nm] h = dwork[k] f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y) g = ctx.hypot(f, 1) if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x c = s = 1 # next qt transformation for j in xrange(l, nm + 1): g = dwork[j+1] y = S[j+1] h = s * g g = c * g z = ctx.hypot(f, h) dwork[j] = z c = f / z s = h / z f = x * c + g * s g = g * c - x * s h = y * s y *= c if not isinstance(V, bool): for jj in xrange(n): x = V[j ,jj] z = V[j+1,jj] V[j ,jj]= x * c + z * s V[j+1,jj ]= z * c - x * s z = ctx.hypot(f, h) S[j] = z if z != 0: # rotation can be arbitray if z=0 z = 1 / z c = f * z s = h * z f = c * g + s * y x = c * y - s * g if calc_u: for jj in xrange(m): y = A[jj,j ] z = A[jj,j+1] A[jj,j ]= y * c + z * s A[jj,j+1 ]= z * c - y * s dwork[l] = 0 dwork[k] = f S[k] = x ########################## # Sort singular values into decreasing order (bubble-sort) for i in xrange(n): imax = i s = ctx.fabs(S[i]) # s is the current maximal element for j in xrange(i + 1, n): c = ctx.fabs(S[j]) if c > s: s = c imax = j if imax != i: # swap singular values z = S[i] S[i] = S[imax] S[imax] = z if calc_u: for j in xrange(m): z = A[j,i] A[j,i] = A[j,imax] A[j,imax] = z if not isinstance(V, bool): for j in xrange(n): z = V[i,j] V[i,j] = V[imax,j] V[imax,j] = z return S ################################################################################################## @defun def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): """ This routine computes the singular value decomposition of a matrix A. Given A, two orthogonal matrices U and V are calculated such that A = U S V and U' U = 1 and V V' = 1 where S is a suitable shaped matrix whose off-diagonal elements are zero. Here ' denotes the transpose. The diagonal elements of S are the singular values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. input: A : a real matrix of shape (m, n) full_matrices : if true, U and V are of shape (m, m) and (n, n). if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). compute_uv : if true, U and V are calculated. if false, only S is calculated. overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of shape (m, m). ortherwise it is of shape (m, min(m, n)). S : an array of length min(m, n) containing the singular values of A sorted by decreasing magnitude. V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of shape (n, n). ortherwise it is of shape (min(m, n), n). return value: S if compute_uv is false (U, S, V) if compute_uv is true overview of the matrices: full_matrices true: A : m*n U : m*m U' U = 1 S as matrix : m*n V : n*n V V' = 1 full_matrices false: A : m*n U : m*min(n,m) U' U = 1 S as matrix : min(m,n)*min(m,n) V : min(m,n)*n V V' = 1 examples: >>> from mpmath import mp >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) >>> S = mp.svd_r(A, compute_uv = False) >>> print(S) [6.0] [3.0] [1.0] >>> U, S, V = mp.svd_r(A) >>> print(mp.chop(A - U * mp.diag(S) * V)) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] see also: svd, svd_c """ m, n = A.rows, A.cols if not compute_uv: if not overwrite_a: A = A.copy() S = svd_r_raw(ctx, A, V = False, calc_u = False) S = S[:min(m,n)] return S if full_matrices and n < m: V = ctx.zeros(m, m) A0 = ctx.zeros(m, m) A0[:,:n] = A S = svd_r_raw(ctx, A0, V, calc_u = True) S = S[:n] V = V[:n,:n] return (A0, S, V) else: if not overwrite_a: A = A.copy() V = ctx.zeros(n, n) S = svd_r_raw(ctx, A, V, calc_u = True) if n > m: if full_matrices == False: V = V[:m,:] S = S[:m] A = A[:,:m] return (A, S, V) ############################## @defun def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): """ This routine computes the singular value decomposition of a matrix A. Given A, two unitary matrices U and V are calculated such that A = U S V and U' U = 1 and V V' = 1 where S is a suitable shaped matrix whose off-diagonal elements are zero. Here ' denotes the hermitian transpose (i.e. transposition and complex conjugation). The diagonal elements of S are the singular values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. input: A : a complex matrix of shape (m, n) full_matrices : if true, U and V are of shape (m, m) and (n, n). if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). compute_uv : if true, U and V are calculated. if false, only S is calculated. overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: U : an unitary matrix: U' U = 1. if full_matrices is true, U is of shape (m, m). ortherwise it is of shape (m, min(m, n)). S : an array of length min(m, n) containing the singular values of A sorted by decreasing magnitude. V : an unitary matrix: V V' = 1. if full_matrices is true, V is of shape (n, n). ortherwise it is of shape (min(m, n), n). return value: S if compute_uv is false (U, S, V) if compute_uv is true overview of the matrices: full_matrices true: A : m*n U : m*m U' U = 1 S as matrix : m*n V : n*n V V' = 1 full_matrices false: A : m*n U : m*min(n,m) U' U = 1 S as matrix : min(m,n)*min(m,n) V : min(m,n)*n V V' = 1 example: >>> from mpmath import mp >>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]]) >>> S = mp.svd_c(A, compute_uv = False) >>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)]))) [0.0] [0.0] [0.0] >>> U, S, V = mp.svd_c(A) >>> print(mp.chop(A - U * mp.diag(S) * V)) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] see also: svd, svd_r """ m, n = A.rows, A.cols if not compute_uv: if not overwrite_a: A = A.copy() S = svd_c_raw(ctx, A, V = False, calc_u = False) S = S[:min(m,n)] return S if full_matrices and n < m: V = ctx.zeros(m, m) A0 = ctx.zeros(m, m) A0[:,:n] = A S = svd_c_raw(ctx, A0, V, calc_u = True) S = S[:n] V = V[:n,:n] return (A0, S, V) else: if not overwrite_a: A = A.copy() V = ctx.zeros(n, n) S = svd_c_raw(ctx, A, V, calc_u = True) if n > m: if full_matrices == False: V = V[:m,:] S = S[:m] A = A[:,:m] return (A, S, V) @defun def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): """ "svd" is a unified interface for "svd_r" and "svd_c". Depending on whether A is real or complex the appropriate function is called. This routine computes the singular value decomposition of a matrix A. Given A, two orthogonal (A real) or unitary (A complex) matrices U and V are calculated such that A = U S V and U' U = 1 and V V' = 1 where S is a suitable shaped matrix whose off-diagonal elements are zero. Here ' denotes the hermitian transpose (i.e. transposition and complex conjugation). The diagonal elements of S are the singular values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. input: A : a real or complex matrix of shape (m, n) full_matrices : if true, U and V are of shape (m, m) and (n, n). if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). compute_uv : if true, U and V are calculated. if false, only S is calculated. overwrite_a : if true, allows modification of A which may improve performance. if false, A is not modified. output: U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of shape (m, m). ortherwise it is of shape (m, min(m, n)). S : an array of length min(m, n) containing the singular values of A sorted by decreasing magnitude. V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of shape (n, n). ortherwise it is of shape (min(m, n), n). return value: S if compute_uv is false (U, S, V) if compute_uv is true overview of the matrices: full_matrices true: A : m*n U : m*m U' U = 1 S as matrix : m*n V : n*n V V' = 1 full_matrices false: A : m*n U : m*min(n,m) U' U = 1 S as matrix : min(m,n)*min(m,n) V : min(m,n)*n V V' = 1 examples: >>> from mpmath import mp >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) >>> S = mp.svd(A, compute_uv = False) >>> print(S) [6.0] [3.0] [1.0] >>> U, S, V = mp.svd(A) >>> print(mp.chop(A - U * mp.diag(S) * V)) [0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0] see also: svd_r, svd_c """ iscomplex = any(type(x) is ctx.mpc for x in A) if iscomplex: return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a) else: return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
58,524
31.370022
127
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/matrices/__init__.py
from . import eigen # to set methods from . import eigen_symmetric # to set methods
94
30.666667
46
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/calculus/optimization.py
from copy import copy from ..libmp.backend import xrange, print_ class OptimizationMethods(object): def __init__(ctx): pass ############## # 1D-SOLVERS # ############## class Newton: """ 1d-solver generating pairs of approximative root and error. Needs starting points x0 close to the root. Pro: * converges fast * sometimes more robust than secant with bad second starting point Contra: * converges slowly for multiple roots * needs first derivative * 2 function evaluations per iteration """ maxsteps = 20 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if len(x0) == 1: self.x0 = x0[0] else: raise ValueError('expected 1 starting point, got %i' % len(x0)) self.f = f if not 'df' in kwargs: def df(x): return self.ctx.diff(f, x) else: df = kwargs['df'] self.df = df def __iter__(self): f = self.f df = self.df x0 = self.x0 while True: x1 = x0 - f(x0) / df(x0) error = abs(x1 - x0) x0 = x1 yield (x1, error) class Secant: """ 1d-solver generating pairs of approximative root and error. Needs starting points x0 and x1 close to the root. x1 defaults to x0 + 0.25. Pro: * converges fast Contra: * converges slowly for multiple roots """ maxsteps = 30 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if len(x0) == 1: self.x0 = x0[0] self.x1 = self.x0 + 0.25 elif len(x0) == 2: self.x0 = x0[0] self.x1 = x0[1] else: raise ValueError('expected 1 or 2 starting points, got %i' % len(x0)) self.f = f def __iter__(self): f = self.f x0 = self.x0 x1 = self.x1 f0 = f(x0) while True: f1 = f(x1) l = x1 - x0 if not l: break s = (f1 - f0) / l if not s: break x0, x1 = x1, x1 - f1/s f0 = f1 yield x1, abs(l) class MNewton: """ 1d-solver generating pairs of approximative root and error. Needs starting point x0 close to the root. Uses modified Newton's method that converges fast regardless of the multiplicity of the root. Pro: * converges fast for multiple roots Contra: * needs first and second derivative of f * 3 function evaluations per iteration """ maxsteps = 20 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if not len(x0) == 1: raise ValueError('expected 1 starting point, got %i' % len(x0)) self.x0 = x0[0] self.f = f if not 'df' in kwargs: def df(x): return self.ctx.diff(f, x) else: df = kwargs['df'] self.df = df if not 'd2f' in kwargs: def d2f(x): return self.ctx.diff(df, x) else: d2f = kwargs['df'] self.d2f = d2f def __iter__(self): x = self.x0 f = self.f df = self.df d2f = self.d2f while True: prevx = x fx = f(x) if fx == 0: break dfx = df(x) d2fx = d2f(x) # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x) x -= fx / (dfx - fx * d2fx / dfx) error = abs(x - prevx) yield x, error class Halley: """ 1d-solver generating pairs of approximative root and error. Needs a starting point x0 close to the root. Uses Halley's method with cubic convergence rate. Pro: * converges even faster the Newton's method * useful when computing with *many* digits Contra: * needs first and second derivative of f * 3 function evaluations per iteration * converges slowly for multiple roots """ maxsteps = 20 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if not len(x0) == 1: raise ValueError('expected 1 starting point, got %i' % len(x0)) self.x0 = x0[0] self.f = f if not 'df' in kwargs: def df(x): return self.ctx.diff(f, x) else: df = kwargs['df'] self.df = df if not 'd2f' in kwargs: def d2f(x): return self.ctx.diff(df, x) else: d2f = kwargs['df'] self.d2f = d2f def __iter__(self): x = self.x0 f = self.f df = self.df d2f = self.d2f while True: prevx = x fx = f(x) dfx = df(x) d2fx = d2f(x) x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx) error = abs(x - prevx) yield x, error class Muller: """ 1d-solver generating pairs of approximative root and error. Needs starting points x0, x1 and x2 close to the root. x1 defaults to x0 + 0.25; x2 to x1 + 0.25. Uses Muller's method that converges towards complex roots. Pro: * converges fast (somewhat faster than secant) * can find complex roots Contra: * converges slowly for multiple roots * may have complex values for real starting points and real roots http://en.wikipedia.org/wiki/Muller's_method """ maxsteps = 30 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if len(x0) == 1: self.x0 = x0[0] self.x1 = self.x0 + 0.25 self.x2 = self.x1 + 0.25 elif len(x0) == 2: self.x0 = x0[0] self.x1 = x0[1] self.x2 = self.x1 + 0.25 elif len(x0) == 3: self.x0 = x0[0] self.x1 = x0[1] self.x2 = x0[2] else: raise ValueError('expected 1, 2 or 3 starting points, got %i' % len(x0)) self.f = f self.verbose = kwargs['verbose'] def __iter__(self): f = self.f x0 = self.x0 x1 = self.x1 x2 = self.x2 fx0 = f(x0) fx1 = f(x1) fx2 = f(x2) while True: # TODO: maybe refactoring with function for divided differences # calculate divided differences fx2x1 = (fx1 - fx2) / (x1 - x2) fx2x0 = (fx0 - fx2) / (x0 - x2) fx1x0 = (fx0 - fx1) / (x0 - x1) w = fx2x1 + fx2x0 - fx1x0 fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2) if w == 0 and fx2x1x0 == 0: if self.verbose: print_('canceled with') print_('x0 =', x0, ', x1 =', x1, 'and x2 =', x2) break x0 = x1 fx0 = fx1 x1 = x2 fx1 = fx2 # denominator should be as large as possible => choose sign r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0) if abs(w - r) > abs(w + r): r = -r x2 -= 2*fx2 / (w + r) fx2 = f(x2) error = abs(x2 - x1) yield x2, error # TODO: consider raising a ValueError when there's no sign change in a and b class Bisection: """ 1d-solver generating pairs of approximative root and error. Uses bisection method to find a root of f in [a, b]. Might fail for multiple roots (needs sign change). Pro: * robust and reliable Contra: * converges slowly * needs sign change """ maxsteps = 100 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if len(x0) != 2: raise ValueError('expected interval of 2 points, got %i' % len(x0)) self.f = f self.a = x0[0] self.b = x0[1] def __iter__(self): f = self.f a = self.a b = self.b l = b - a fb = f(b) while True: m = self.ctx.ldexp(a + b, -1) fm = f(m) sign = fm * fb if sign < 0: a = m elif sign > 0: b = m fb = fm else: yield m, self.ctx.zero l /= 2 yield (a + b)/2, abs(l) def _getm(method): """ Return a function to calculate m for Illinois-like methods. """ if method == 'illinois': def getm(fz, fb): return 0.5 elif method == 'pegasus': def getm(fz, fb): return fb/(fb + fz) elif method == 'anderson': def getm(fz, fb): m = 1 - fz/fb if m > 0: return m else: return 0.5 else: raise ValueError("method '%s' not recognized" % method) return getm class Illinois: """ 1d-solver generating pairs of approximative root and error. Uses Illinois method or similar to find a root of f in [a, b]. Might fail for multiple roots (needs sign change). Combines bisect with secant (improved regula falsi). The only difference between the methods is the scaling factor m, which is used to ensure convergence (you can choose one using the 'method' keyword): Illinois method ('illinois'): m = 0.5 Pegasus method ('pegasus'): m = fb/(fb + fz) Anderson-Bjoerk method ('anderson'): m = 1 - fz/fb if positive else 0.5 Pro: * converges very fast Contra: * has problems with multiple roots * needs sign change """ maxsteps = 30 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if len(x0) != 2: raise ValueError('expected interval of 2 points, got %i' % len(x0)) self.a = x0[0] self.b = x0[1] self.f = f self.tol = kwargs['tol'] self.verbose = kwargs['verbose'] self.method = kwargs.get('method', 'illinois') self.getm = _getm(self.method) if self.verbose: print_('using %s method' % self.method) def __iter__(self): method = self.method f = self.f a = self.a b = self.b fa = f(a) fb = f(b) m = None while True: l = b - a if l == 0: break s = (fb - fa) / l z = a - fa/s fz = f(z) if abs(fz) < self.tol: # TODO: better condition (when f is very flat) if self.verbose: print_('canceled with z =', z) yield z, l break if fz * fb < 0: # root in [z, b] a = b fa = fb b = z fb = fz else: # root in [a, z] m = self.getm(fz, fb) b = z fb = fz fa = m*fa # scale down to ensure convergence if self.verbose and m and not method == 'illinois': print_('m:', m) yield (a + b)/2, abs(l) def Pegasus(*args, **kwargs): """ 1d-solver generating pairs of approximative root and error. Uses Pegasus method to find a root of f in [a, b]. Wrapper for illinois to use method='pegasus'. """ kwargs['method'] = 'pegasus' return Illinois(*args, **kwargs) def Anderson(*args, **kwargs): """ 1d-solver generating pairs of approximative root and error. Uses Anderson-Bjoerk method to find a root of f in [a, b]. Wrapper for illinois to use method='pegasus'. """ kwargs['method'] = 'anderson' return Illinois(*args, **kwargs) # TODO: check whether it's possible to combine it with Illinois stuff class Ridder: """ 1d-solver generating pairs of approximative root and error. Ridders' method to find a root of f in [a, b]. Is told to perform as well as Brent's method while being simpler. Pro: * very fast * simpler than Brent's method Contra: * two function evaluations per step * has problems with multiple roots * needs sign change http://en.wikipedia.org/wiki/Ridders'_method """ maxsteps = 30 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx self.f = f if len(x0) != 2: raise ValueError('expected interval of 2 points, got %i' % len(x0)) self.x1 = x0[0] self.x2 = x0[1] self.verbose = kwargs['verbose'] self.tol = kwargs['tol'] def __iter__(self): ctx = self.ctx f = self.f x1 = self.x1 fx1 = f(x1) x2 = self.x2 fx2 = f(x2) while True: x3 = 0.5*(x1 + x2) fx3 = f(x3) x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2) fx4 = f(x4) if abs(fx4) < self.tol: # TODO: better condition (when f is very flat) if self.verbose: print_('canceled with f(x4) =', fx4) yield x4, abs(x1 - x2) break if fx4 * fx2 < 0: # root in [x4, x2] x1 = x4 fx1 = fx4 else: # root in [x1, x4] x2 = x4 fx2 = fx4 error = abs(x1 - x2) yield (x1 + x2)/2, error class ANewton: """ EXPERIMENTAL 1d-solver generating pairs of approximative root and error. Uses Newton's method modified to use Steffensens method when convergence is slow. (I.e. for multiple roots.) """ maxsteps = 20 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx if not len(x0) == 1: raise ValueError('expected 1 starting point, got %i' % len(x0)) self.x0 = x0[0] self.f = f if not 'df' in kwargs: def df(x): return self.ctx.diff(f, x) else: df = kwargs['df'] self.df = df def phi(x): return x - f(x) / df(x) self.phi = phi self.verbose = kwargs['verbose'] def __iter__(self): x0 = self.x0 f = self.f df = self.df phi = self.phi error = 0 counter = 0 while True: prevx = x0 try: x0 = phi(x0) except ZeroDivisionError: if self.verbose: print_('ZeroDivisionError: canceled with x =', x0) break preverror = error error = abs(prevx - x0) # TODO: decide not to use convergence acceleration if error and abs(error - preverror) / error < 1: if self.verbose: print_('converging slowly') counter += 1 if counter >= 3: # accelerate convergence phi = steffensen(phi) counter = 0 if self.verbose: print_('accelerating convergence') yield x0, error # TODO: add Brent ############################ # MULTIDIMENSIONAL SOLVERS # ############################ def jacobian(ctx, f, x): """ Calculate the Jacobian matrix of a function at the point x0. This is the first derivative of a vectorial function: f : R^m -> R^n with m >= n """ x = ctx.matrix(x) h = ctx.sqrt(ctx.eps) fx = ctx.matrix(f(*x)) m = len(fx) n = len(x) J = ctx.matrix(m, n) for j in xrange(n): xj = x.copy() xj[j] += h Jj = (ctx.matrix(f(*xj)) - fx) / h for i in xrange(m): J[i,j] = Jj[i] return J # TODO: test with user-specified jacobian matrix, support force_type class MDNewton: """ Find the root of a vector function numerically using Newton's method. f is a vector function representing a nonlinear equation system. x0 is the starting point close to the root. J is a function returning the Jacobian matrix for a point. Supports overdetermined systems. Use the 'norm' keyword to specify which norm to use. Defaults to max-norm. The function to calculate the Jacobian matrix can be given using the keyword 'J'. Otherwise it will be calculated numerically. Please note that this method converges only locally. Especially for high- dimensional systems it is not trivial to find a good starting point being close enough to the root. It is recommended to use a faster, low-precision solver from SciPy [1] or OpenOpt [2] to get an initial guess. Afterwards you can use this method for root-polishing to any precision. [1] http://scipy.org [2] http://openopt.org/Welcome """ maxsteps = 10 def __init__(self, ctx, f, x0, **kwargs): self.ctx = ctx self.f = f if isinstance(x0, (tuple, list)): x0 = ctx.matrix(x0) assert x0.cols == 1, 'need a vector' self.x0 = x0 if 'J' in kwargs: self.J = kwargs['J'] else: def J(*x): return ctx.jacobian(f, x) self.J = J self.norm = kwargs['norm'] self.verbose = kwargs['verbose'] def __iter__(self): f = self.f x0 = self.x0 norm = self.norm J = self.J fx = self.ctx.matrix(f(*x0)) fxnorm = norm(fx) cancel = False while not cancel: # get direction of descent fxn = -fx Jx = J(*x0) s = self.ctx.lu_solve(Jx, fxn) if self.verbose: print_('Jx:') print_(Jx) print_('s:', s) # damping step size TODO: better strategy (hard task) l = self.ctx.one x1 = x0 + s while True: if x1 == x0: if self.verbose: print_("canceled, won't get more excact") cancel = True break fx = self.ctx.matrix(f(*x1)) newnorm = norm(fx) if newnorm < fxnorm: # new x accepted fxnorm = newnorm x0 = x1 break l /= 2 x1 = x0 + l*s yield (x0, fxnorm) ############# # UTILITIES # ############# str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton, 'halley':Halley, 'muller':Muller, 'bisect':Bisection, 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson, 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton} def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs): r""" Find a solution to `f(x) = 0`, using *x0* as starting point or interval for *x*. Multidimensional overdetermined systems are supported. You can specify them using a function or a list of functions. If the found root does not satisfy `|f(x)|^2 \leq \mathrm{tol}`, an exception is raised (this can be disabled with *verify=False*). **Arguments** *f* one dimensional function *x0* starting point, several starting points or interval (depends on solver) *tol* the returned solution has an error smaller than this *verbose* print additional information for each iteration if true *verify* verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}` *solver* a generator for *f* and *x0* returning approximative solution and error *maxsteps* after how many steps the solver will cancel *df* first derivative of *f* (used by some solvers) *d2f* second derivative of *f* (used by some solvers) *multidimensional* force multidimensional solving *J* Jacobian matrix of *f* (used by multidimensional solvers) *norm* used vector norm (used by multidimensional solvers) solver has to be callable with ``(f, x0, **kwargs)`` and return an generator yielding pairs of approximative solution and estimated error (which is expected to be positive). You can use the following string aliases: 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson', 'ridder', 'anewton', 'bisect' See mpmath.calculus.optimization for their documentation. **Examples** The function :func:`~mpmath.findroot` locates a root of a given function using the secant method by default. A simple example use of the secant method is to compute `\pi` as the root of `\sin x` closest to `x_0 = 3`:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> findroot(sin, 3) 3.14159265358979323846264338328 The secant method can be used to find complex roots of analytic functions, although it must in that case generally be given a nonreal starting value (or else it will never leave the real line):: >>> mp.dps = 15 >>> findroot(lambda x: x**3 + 2*x + 1, j) (0.226698825758202 + 1.46771150871022j) A nice application is to compute nontrivial roots of the Riemann zeta function with many digits (good initial values are needed for convergence):: >>> mp.dps = 30 >>> findroot(zeta, 0.5+14j) (0.5 + 14.1347251417346937904572519836j) The secant method can also be used as an optimization algorithm, by passing it a derivative of a function. The following example locates the positive minimum of the gamma function:: >>> mp.dps = 20 >>> findroot(lambda x: diff(gamma, x), 1) 1.4616321449683623413 Finally, a useful application is to compute inverse functions, such as the Lambert W function which is the inverse of `w e^w`, given the first term of the solution's asymptotic expansion as the initial value. In basic cases, this gives identical results to mpmath's built-in ``lambertw`` function:: >>> def lambert(x): ... return findroot(lambda w: w*exp(w) - x, log(1+x)) ... >>> mp.dps = 15 >>> lambert(1); lambertw(1) 0.567143290409784 0.567143290409784 >>> lambert(1000); lambert(1000) 5.2496028524016 5.2496028524016 Multidimensional functions are also supported:: >>> f = [lambda x1, x2: x1**2 + x2, ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3] >>> findroot(f, (0, 0)) [-0.618033988749895] [-0.381966011250105] >>> findroot(f, (10, 10)) [ 1.61803398874989] [-2.61803398874989] You can verify this by solving the system manually. Please note that the following (more general) syntax also works:: >>> def f(x1, x2): ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3 ... >>> findroot(f, (0, 0)) [-0.618033988749895] [-0.381966011250105] **Multiple roots** For multiple roots all methods of the Newtonian family (including secant) converge slowly. Consider this example:: >>> f = lambda x: (x - 1)**99 >>> findroot(f, 0.9, verify=False) 0.918073542444929 Even for a very close starting point the secant method converges very slowly. Use ``verbose=True`` to illustrate this. It is possible to modify Newton's method to make it converge regardless of the root's multiplicity:: >>> findroot(f, -10, solver='mnewton') 1.0 This variant uses the first and second derivative of the function, which is not very efficient. Alternatively you can use an experimental Newtonian solver that keeps track of the speed of convergence and accelerates it using Steffensen's method if necessary:: >>> findroot(f, -10, solver='anewton', verbose=True) x: -9.88888888888888888889 error: 0.111111111111111111111 converging slowly x: -9.77890011223344556678 error: 0.10998877665544332211 converging slowly x: -9.67002233332199662166 error: 0.108877778911448945119 converging slowly accelerating convergence x: -9.5622443299551077669 error: 0.107778003366888854764 converging slowly x: 0.99999999999999999214 error: 10.562244329955107759 x: 1.0 error: 7.8598304758094664213e-18 ZeroDivisionError: canceled with x = 1.0 1.0 **Complex roots** For complex roots it's recommended to use Muller's method as it converges even for real starting points very fast:: >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller') (0.727136084491197 + 0.934099289460529j) **Intersection methods** When you need to find a root in a known interval, it's highly recommended to use an intersection-based solver like ``'anderson'`` or ``'ridder'``. Usually they converge faster and more reliable. They have however problems with multiple roots and usually need a sign change to find a root:: >>> findroot(lambda x: x**3, (-1, 1), solver='anderson') 0.0 Be careful with symmetric functions:: >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS Traceback (most recent call last): ... ZeroDivisionError It fails even for better starting points, because there is no sign change:: >>> findroot(lambda x: x**2, (-1, .5), solver='anderson') Traceback (most recent call last): ... ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19) Try another starting point or tweak arguments. """ prec = ctx.prec try: ctx.prec += 20 # initialize arguments if tol is None: tol = ctx.eps * 2**10 kwargs['verbose'] = kwargs.get('verbose', verbose) if 'd1f' in kwargs: kwargs['df'] = kwargs['d1f'] kwargs['tol'] = tol if isinstance(x0, (list, tuple)): x0 = [ctx.convert(x) for x in x0] else: x0 = [ctx.convert(x0)] if isinstance(solver, str): try: solver = str2solver[solver] except KeyError: raise ValueError('could not recognize solver') # accept list of functions if isinstance(f, (list, tuple)): f2 = copy(f) def tmp(*args): return [fn(*args) for fn in f2] f = tmp # detect multidimensional functions try: fx = f(*x0) multidimensional = isinstance(fx, (list, tuple, ctx.matrix)) except TypeError: fx = f(x0[0]) multidimensional = False if 'multidimensional' in kwargs: multidimensional = kwargs['multidimensional'] if multidimensional: # only one multidimensional solver available at the moment solver = MDNewton if not 'norm' in kwargs: norm = lambda x: ctx.norm(x, 'inf') kwargs['norm'] = norm else: norm = kwargs['norm'] else: norm = abs # happily return starting point if it's a root if norm(fx) == 0: if multidimensional: return ctx.matrix(x0) else: return x0[0] # use solver iterations = solver(ctx, f, x0, **kwargs) if 'maxsteps' in kwargs: maxsteps = kwargs['maxsteps'] else: maxsteps = iterations.maxsteps i = 0 for x, error in iterations: if verbose: print_('x: ', x) print_('error:', error) i += 1 if error < tol * max(1, norm(x)) or i >= maxsteps: break if not isinstance(x, (list, tuple, ctx.matrix)): xl = [x] else: xl = x if verify and norm(f(*xl))**2 > tol: # TODO: better condition? raise ValueError('Could not find root within given tolerance. ' '(%s > %s)\n' 'Try another starting point or tweak arguments.' % (norm(f(*xl))**2, tol)) return x finally: ctx.prec = prec def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs): """ Return the multiplicity of a given root of f. Internally, numerical derivatives are used. This might be inefficient for higher order derviatives. Due to this, ``multiplicity`` cancels after evaluating 10 derivatives by default. You can be specify the n-th derivative using the dnf keyword. >>> from mpmath import * >>> multiplicity(lambda x: sin(x) - 1, pi/2) 2 """ if tol is None: tol = ctx.eps ** 0.8 kwargs['d0f'] = f for i in xrange(maxsteps): dfstr = 'd' + str(i) + 'f' if dfstr in kwargs: df = kwargs[dfstr] else: df = lambda x: ctx.diff(f, x, i) if not abs(df(root)) < tol: break return i def steffensen(f): """ linear convergent function -> quadratic convergent function Steffensen's method for quadratic convergence of a linear converging sequence. Don not use it for higher rates of convergence. It may even work for divergent sequences. Definition: F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x) Example ....... You can use Steffensen's method to accelerate a fixpoint iteration of linear (or less) convergence. x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For phi(x) = x**2 there are two fixpoints: 0 and 1. Let's try Steffensen's method: >>> f = lambda x: x**2 >>> from mpmath.calculus.optimization import steffensen >>> F = steffensen(f) >>> for x in [0.5, 0.9, 2.0]: ... fx = Fx = x ... for i in xrange(9): ... try: ... fx = f(fx) ... except OverflowError: ... pass ... try: ... Fx = F(Fx) ... except ZeroDivisionError: ... pass ... print('%20g %20g' % (fx, Fx)) 0.25 -0.5 0.0625 0.1 0.00390625 -0.0011236 1.52588e-05 1.41691e-09 2.32831e-10 -2.84465e-27 5.42101e-20 2.30189e-80 2.93874e-39 -1.2197e-239 8.63617e-78 0 7.45834e-155 0 0.81 1.02676 0.6561 1.00134 0.430467 1 0.185302 1 0.0343368 1 0.00117902 1 1.39008e-06 1 1.93233e-12 1 3.73392e-24 1 4 1.6 16 1.2962 256 1.10194 65536 1.01659 4.29497e+09 1.00053 1.84467e+19 1 3.40282e+38 1 1.15792e+77 1 1.34078e+154 1 Unmodified, the iteration converges only towards 0. Modified it converges not only much faster, it converges even to the repelling fixpoint 1. """ def F(x): fx = f(x) ffx = f(fx) return (x*ffx - fx**2) / (ffx - 2*fx + x) return F OptimizationMethods.jacobian = jacobian OptimizationMethods.findroot = findroot OptimizationMethods.multiplicity = multiplicity if __name__ == '__main__': import doctest doctest.testmod()
32,219
28.559633
98
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/calculus/polynomials.py
from ..libmp.backend import xrange from .calculus import defun #----------------------------------------------------------------------------# # Polynomials # #----------------------------------------------------------------------------# # XXX: extra precision @defun def polyval(ctx, coeffs, x, derivative=False): r""" Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`, :func:`~mpmath.polyval` evaluates the polynomial .. math :: P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0. If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously evaluates `P(x)` with the derivative, `P'(x)`, and returns the tuple `(P(x), P'(x))`. >>> from mpmath import * >>> mp.pretty = True >>> polyval([3, 0, 2], 0.5) 2.75 >>> polyval([3, 0, 2], 0.5, derivative=True) (2.75, 3.0) The coefficients and the evaluation point may be any combination of real or complex numbers. """ if not coeffs: return ctx.zero p = ctx.convert(coeffs[0]) q = ctx.zero for c in coeffs[1:]: if derivative: q = p + x*q p = c + x*p if derivative: return p, q else: return p @defun def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, error=False, roots_init=None): """ Computes all roots (real or complex) of a given polynomial. The roots are returned as a sorted list, where real roots appear first followed by complex conjugate roots as adjacent elements. The polynomial should be given as a list of coefficients, in the format used by :func:`~mpmath.polyval`. The leading coefficient must be nonzero. With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* where *err* is an estimate of the maximum error among the computed roots. **Examples** Finding the three real roots of `x^3 - x^2 - 14x + 24`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nprint(polyroots([1,-1,-14,24]), 4) [-4.0, 2.0, 3.0] Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an error estimate:: >>> roots, err = polyroots([4,3,2], error=True) >>> for r in roots: ... print(r) ... (-0.375 + 0.59947894041409j) (-0.375 - 0.59947894041409j) >>> >>> err 2.22044604925031e-16 >>> >>> polyval([4,3,2], roots[0]) (2.22044604925031e-16 + 0.0j) >>> polyval([4,3,2], roots[1]) (2.22044604925031e-16 + 0.0j) The following example computes all the 5th roots of unity; that is, the roots of `x^5 - 1`:: >>> mp.dps = 20 >>> for r in polyroots([1, 0, 0, 0, 0, -1]): ... print(r) ... 1.0 (-0.8090169943749474241 + 0.58778525229247312917j) (-0.8090169943749474241 - 0.58778525229247312917j) (0.3090169943749474241 + 0.95105651629515357212j) (0.3090169943749474241 - 0.95105651629515357212j) **Precision and conditioning** The roots are computed to the current working precision accuracy. If this accuracy cannot be achieved in `maxsteps` steps, then a `NoConvergence` exception is raised. The algorithm internally is using the current working precision extended by `extraprec`. If `NoConvergence` was raised, that is caused either by not having enough extra precision to achieve convergence (in which case increasing `extraprec` should fix the problem) or too low `maxsteps` (in which case increasing `maxsteps` should fix the problem), or a combination of both. The user should always do a convergence study with regards to `extraprec` to ensure accurate results. It is possible to get convergence to a wrong answer with too low `extraprec`. Provided there are no repeated roots, :func:`~mpmath.polyroots` can typically compute all roots of an arbitrary polynomial to high precision:: >>> mp.dps = 60 >>> for r in polyroots([1, 0, -10, 0, 1]): ... print r ... -3.14626436994197234232913506571557044551247712918732870123249 -0.317837245195782244725757617296174288373133378433432554879127 0.317837245195782244725757617296174288373133378433432554879127 3.14626436994197234232913506571557044551247712918732870123249 >>> >>> sqrt(3) + sqrt(2) 3.14626436994197234232913506571557044551247712918732870123249 >>> sqrt(3) - sqrt(2) 0.317837245195782244725757617296174288373133378433432554879127 **Algorithm** :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which uses complex arithmetic to locate all roots simultaneously. The Durand-Kerner method can be viewed as approximately performing simultaneous Newton iteration for all the roots. In particular, the convergence to simple roots is quadratic, just like Newton's method. Although all roots are internally calculated using complex arithmetic, any root found to have an imaginary part smaller than the estimated numerical error is truncated to a real number (small real parts are also chopped). Real roots are placed first in the returned list, sorted by value. The remaining complex roots are sorted by their real parts so that conjugate roots end up next to each other. **References** 1. http://en.wikipedia.org/wiki/Durand-Kerner_method """ if len(coeffs) <= 1: if not coeffs or not coeffs[0]: raise ValueError("Input to polyroots must not be the zero polynomial") # Constant polynomial with no roots return [] orig = ctx.prec tol = +ctx.eps with ctx.extraprec(extraprec): deg = len(coeffs) - 1 # Must be monic lead = ctx.convert(coeffs[0]) if lead == 1: coeffs = [ctx.convert(c) for c in coeffs] else: coeffs = [c/lead for c in coeffs] f = lambda x: ctx.polyval(coeffs, x) if roots_init is None: roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)] else: roots = [None]*deg; deg_init = min(deg, len(roots_init)) roots[:deg_init] = list(roots_init[:deg_init]) roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg_init,deg)] err = [ctx.one for n in xrange(deg)] # Durand-Kerner iteration until convergence for step in xrange(maxsteps): if abs(max(err)) < tol: break for i in xrange(deg): p = roots[i] x = f(p) for j in range(deg): if i != j: try: x /= (p-roots[j]) except ZeroDivisionError: continue roots[i] = p - x err[i] = abs(x) if abs(max(err)) >= tol: raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \ % maxsteps) # Remove small real or imaginary parts if cleanup: for i in xrange(deg): if abs(roots[i]) < tol: roots[i] = ctx.zero elif abs(ctx._im(roots[i])) < tol: roots[i] = roots[i].real elif abs(ctx._re(roots[i])) < tol: roots[i] = roots[i].imag * 1j roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x))) if error: err = max(err) err = max(err, ctx.ldexp(1, -orig+1)) return [+r for r in roots], +err else: return [+r for r in roots]
7,854
35.877934
82
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/calculus/approximation.py
from ..libmp.backend import xrange from .calculus import defun #----------------------------------------------------------------------------# # Approximation methods # #----------------------------------------------------------------------------# # The Chebyshev approximation formula is given at: # http://mathworld.wolfram.com/ChebyshevApproximationFormula.html # The only major changes in the following code is that we return the # expanded polynomial coefficients instead of Chebyshev coefficients, # and that we automatically transform [a,b] -> [-1,1] and back # for convenience. # Coefficient in Chebyshev approximation def chebcoeff(ctx,f,a,b,j,N): s = ctx.mpf(0) h = ctx.mpf(0.5) for k in range(1, N+1): t = ctx.cospi((k-h)/N) s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N) return 2*s/N # Generate Chebyshev polynomials T_n(ax+b) in expanded form def chebT(ctx, a=1, b=0): Tb = [1] yield Tb Ta = [b, a] while 1: yield Ta # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b) Tmp = [0] + [2*a*t for t in Ta] for i, c in enumerate(Ta): Tmp[i] += 2*b*c for i, c in enumerate(Tb): Tmp[i] -= c Ta, Tb = Tmp, Ta @defun def chebyfit(ctx, f, interval, N, error=False): r""" Computes a polynomial of degree `N-1` that approximates the given function `f` on the interval `[a, b]`. With ``error=True``, :func:`~mpmath.chebyfit` also returns an accurate estimate of the maximum absolute error; that is, the maximum value of `|f(x) - P(x)|` for `x \in [a, b]`. :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula, which gives a nearly optimal solution: that is, the maximum error of the approximating polynomial is very close to the smallest possible for any polynomial of the same degree. Chebyshev approximation is very useful if one needs repeated evaluation of an expensive function, such as function defined implicitly by an integral or a differential equation. (For example, it could be used to turn a slow mpmath function into a fast machine-precision version of the same.) **Examples** Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation of `f(x) = \cos(x)`, valid on the interval `[1, 2]`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> poly, err = chebyfit(cos, [1, 2], 5, error=True) >>> nprint(poly) [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553] >>> nprint(err, 12) 1.61351758081e-5 The polynomial can be evaluated using ``polyval``:: >>> nprint(polyval(poly, 1.6), 12) -0.0291858904138 >>> nprint(cos(1.6), 12) -0.0291995223013 Sampling the true error at 1000 points shows that the error estimate generated by ``chebyfit`` is remarkably good:: >>> error = lambda x: abs(cos(x) - polyval(poly, x)) >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12) 1.61349954245e-5 **Choice of degree** The degree `N` can be set arbitrarily high, to obtain an arbitrarily good approximation. As a rule of thumb, an `N`-term Chebyshev approximation is good to `N/(b-a)` decimal places on a unit interval (although this depends on how well-behaved `f` is). The cost grows accordingly: ``chebyfit`` evaluates the function `(N^2)/2` times to compute the coefficients and an additional `N` times to estimate the error. **Possible issues** One should be careful to use a sufficiently high working precision both when calling ``chebyfit`` and when evaluating the resulting polynomial, as the polynomial is sometimes ill-conditioned. It is for example difficult to reach 15-digit accuracy when evaluating the polynomial using machine precision floats, no matter the theoretical accuracy of the polynomial. (The option to return the coefficients in Chebyshev form should be made available in the future.) It is important to note the Chebyshev approximation works poorly if `f` is not smooth. A function containing singularities, rapid oscillation, etc can be approximated more effectively by multiplying it by a weight function that cancels out the nonsmooth features, or by dividing the interval into several segments. """ a, b = ctx._as_points(interval) orig = ctx.prec try: ctx.prec = orig + int(N**0.5) + 20 c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)] d = [ctx.zero] * N d[0] = -c[0]/2 h = ctx.mpf(0.5) T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a)) for (k, Tk) in zip(range(N), T): for i in range(len(Tk)): d[i] += c[k]*Tk[i] d = d[::-1] # Estimate maximum error err = ctx.zero for k in range(N): x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h err = max(err, abs(f(x) - ctx.polyval(d, x))) finally: ctx.prec = orig if error: return d, +err else: return d @defun def fourier(ctx, f, interval, N): r""" Computes the Fourier series of degree `N` of the given function on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns two lists `(c, s)` of coefficients (the cosine series and sine series, respectively), such that .. math :: f(x) \sim \sum_{k=0}^N c_k \cos(k m x) + s_k \sin(k m x) where `m = 2 \pi / (b-a)`. Note that many texts define the first coefficient as `2 c_0` instead of `c_0`. The easiest way to evaluate the computed series correctly is to pass it to :func:`~mpmath.fourierval`. **Examples** The function `f(x) = x` has a simple Fourier series on the standard interval `[-\pi, \pi]`. The cosine coefficients are all zero (because the function has odd symmetry), and the sine coefficients are rational numbers:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> c, s = fourier(lambda x: x, [-pi, pi], 5) >>> nprint(c) [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] >>> nprint(s) [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4] This computes a Fourier series of a nonsymmetric function on a nonstandard interval:: >>> I = [-1, 1.5] >>> f = lambda x: x**2 - 4*x + 1 >>> cs = fourier(f, I, 4) >>> nprint(cs[0]) [0.583333, 1.12479, -1.27552, 0.904708, -0.441296] >>> nprint(cs[1]) [0.0, -2.6255, 0.580905, 0.219974, -0.540057] It is instructive to plot a function along with its truncated Fourier series:: >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP Fourier series generally converge slowly (and may not converge pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier series gives an `L^2` error corresponding to 2-digit accuracy:: >>> I = [-1, 1] >>> cs = fourier(cosh, I, 9) >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2 >>> nprint(sqrt(quad(g, I))) 0.00467963 :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions, the accuracy (and speed) can be improved by including all singular points in the interval specification:: >>> nprint(fourier(abs, [-1, 1], 0), 10) ([0.5000441648], [0.0]) >>> nprint(fourier(abs, [-1, 0, 1], 0), 10) ([0.5], [0.0]) """ interval = ctx._as_points(interval) a = interval[0] b = interval[-1] L = b-a cos_series = [] sin_series = [] cutoff = ctx.eps*10 for n in xrange(N+1): m = 2*n*ctx.pi/L an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L if n == 0: an /= 2 if abs(an) < cutoff: an = ctx.zero if abs(bn) < cutoff: bn = ctx.zero cos_series.append(an) sin_series.append(bn) return cos_series, sin_series @defun def fourierval(ctx, series, interval, x): """ Evaluates a Fourier series (in the format computed by by :func:`~mpmath.fourier` for the given interval) at the point `x`. The series should be a pair `(c, s)` where `c` is the cosine series and `s` is the sine series. The two lists need not have the same length. """ cs, ss = series ab = ctx._as_points(interval) a = interval[0] b = interval[-1] m = 2*ctx.pi/(ab[-1]-ab[0]) s = ctx.zero s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n]) s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n]) return s
8,817
34.700405
79
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/calculus/calculus.py
class CalculusMethods(object): pass def defun(f): setattr(CalculusMethods, f.__name__, f)
99
15.666667
43
py
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/bandits/venv/lib/python3.6/site-packages/mpmath/calculus/extrapolation.py
try: from itertools import izip except ImportError: izip = zip from ..libmp.backend import xrange from .calculus import defun try: next = next except NameError: next = lambda _: _.next() @defun def richardson(ctx, seq): r""" Given a list ``seq`` of the first `N` elements of a slowly convergent infinite sequence, :func:`~mpmath.richardson` computes the `N`-term Richardson extrapolate for the limit. :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated limit and `c` is the magnitude of the largest weight used during the computation. The weight provides an estimate of the precision lost to cancellation. Due to cancellation effects, the sequence must be typically be computed at a much higher precision than the target accuracy of the extrapolation. **Applicability and issues** The `N`-step Richardson extrapolation algorithm used by :func:`~mpmath.richardson` is described in [1]. Richardson extrapolation only works for a specific type of sequence, namely one converging like partial sums of `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials. When the sequence does not convergence at such a rate :func:`~mpmath.richardson` generally produces garbage. Richardson extrapolation has the advantage of being fast: the `N`-term extrapolate requires only `O(N)` arithmetic operations, and usually produces an estimate that is accurate to `O(N)` digits. Contrast with the Shanks transformation (see :func:`~mpmath.shanks`), which requires `O(N^2)` operations. :func:`~mpmath.richardson` is unable to produce an estimate for the approximation error. One way to estimate the error is to perform two extrapolations with slightly different `N` and comparing the results. Richardson extrapolation does not work for oscillating sequences. As a simple workaround, :func:`~mpmath.richardson` detects if the last three elements do not differ monotonically, and in that case applies extrapolation only to the even-index elements. **Example** Applying Richardson extrapolation to the Leibniz series for `\pi`:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) ... for m in range(1,30)] >>> v, c = richardson(S[:10]) >>> v 3.2126984126984126984126984127 >>> nprint([v-pi, c]) [0.0711058, 2.0] >>> v, c = richardson(S[:30]) >>> v 3.14159265468624052829954206226 >>> nprint([v-pi, c]) [1.09645e-9, 20833.3] **References** 1. [BenderOrszag]_ pp. 375-376 """ if len(seq) < 3: raise ValueError("seq should be of minimum length 3") if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]): seq = seq[::2] N = len(seq)//2-1 s = ctx.zero # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)! # To avoid repeated factorials, we simplify the quotient # of successive weights to obtain a recurrence relation c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N)) maxc = 1 for k in xrange(N+1): s += c * seq[N+k] maxc = max(abs(c), maxc) c *= (k-N)*ctx.mpf(k+N+1)**N c /= ((1+k)*ctx.mpf(k+N)**N) return s, maxc @defun def shanks(ctx, seq, table=None, randomized=False): r""" Given a list ``seq`` of the first `N` elements of a slowly convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks transformation often provides strong convergence acceleration, especially if the sequence is oscillating. The iterated Shanks transformation is computed using the Wynn epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full epsilon table generated by Wynn's algorithm, which can be read off as follows: * The table is a list of lists forming a lower triangular matrix, where higher row and column indices correspond to more accurate values. * The columns with even index hold dummy entries (required for the computation) and the columns with odd index hold the actual extrapolates. * The last element in the last row is typically the most accurate estimate of the limit. * The difference to the third last element in the last row provides an estimate of the approximation error. * The magnitude of the second last element provides an estimate of the numerical accuracy lost to cancellation. For convenience, so the extrapolation is stopped at an odd index so that ``shanks(seq)[-1][-1]`` always gives an estimate of the limit. Optionally, an existing table can be passed to :func:`~mpmath.shanks`. This can be used to efficiently extend a previous computation after new elements have been appended to the sequence. The table will then be updated in-place. **The Shanks transformation** The Shanks transformation is defined as follows (see [2]): given the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is given by .. math :: S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k} The Shanks transformation gives the exact limit `A_{\infty}` in a single step if `A_k = A + a q^k`. Note in particular that it extrapolates the exact sum of a geometric series in a single step. Applying the Shanks transformation once often improves convergence substantially for an arbitrary sequence, but the optimal effect is obtained by applying it iteratively: `S(S(A_k)), S(S(S(A_k))), \ldots`. Wynn's epsilon algorithm provides an efficient way to generate the table of iterated Shanks transformations. It reduces the computation of each element to essentially a single division, at the cost of requiring dummy elements in the table. See [1] for details. **Precision issues** Due to cancellation effects, the sequence must be typically be computed at a much higher precision than the target accuracy of the extrapolation. If the Shanks transformation converges to the exact limit (such as if the sequence is a geometric series), then a division by zero occurs. By default, :func:`~mpmath.shanks` handles this case by terminating the iteration and returning the table it has generated so far. With *randomized=True*, it will instead replace the zero by a pseudorandom number close to zero. (TODO: find a better solution to this problem.) **Examples** We illustrate by applying Shanks transformation to the Leibniz series for `\pi`:: >>> from mpmath import * >>> mp.dps = 50 >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) ... for m in range(1,30)] >>> >>> T = shanks(S[:7]) >>> for row in T: ... nprint(row) ... [-0.75] [1.25, 3.16667] [-1.75, 3.13333, -28.75] [2.25, 3.14524, 82.25, 3.14234] [-2.75, 3.13968, -177.75, 3.14139, -969.937] [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161] The extrapolated accuracy is about 4 digits, and about 4 digits may have been lost due to cancellation:: >>> L = T[-1] >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) [2.22532e-5, 4.78309e-5, 3515.06] Now we extend the computation:: >>> T = shanks(S[:25], T) >>> L = T[-1] >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) [3.75527e-19, 1.48478e-19, 2.96014e+17] The value for pi is now accurate to 18 digits. About 18 digits may also have been lost to cancellation. Here is an example with a geometric series, where the convergence is immediate (the sum is exactly 1):: >>> mp.dps = 15 >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]): ... nprint(row) [4.0] [8.0, 1.0] **References** 1. [GravesMorris]_ 2. [BenderOrszag]_ pp. 368-375 """ if len(seq) < 2: raise ValueError("seq should be of minimum length 2") if table: START = len(table) else: START = 0 table = [] STOP = len(seq) - 1 if STOP & 1: STOP -= 1 one = ctx.one eps = +ctx.eps if randomized: from random import Random rnd = Random() rnd.seed(START) for i in xrange(START, STOP): row = [] for j in xrange(i+1): if j == 0: a, b = 0, seq[i+1]-seq[i] else: if j == 1: a = seq[i] else: a = table[i-1][j-2] b = row[j-1] - table[i-1][j-1] if not b: if randomized: b = rnd.getrandbits(10)*eps elif i & 1: return table[:-1] else: return table row.append(a + one/b) table.append(row) return table class levin_class: # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) r""" This interface implements Levin's (nonlinear) sequence transformation for convergence acceleration and summation of divergent series. It performs better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent or alternating divergent series. Let *A* be the series we want to sum: .. math :: A = \sum_{k=0}^{\infty} a_k Attention: all `a_k` must be non-zero! Let `s_n` be the partial sums of this series: .. math :: s_n = \sum_{k=0}^n a_k. **Methods** Calling ``levin`` returns an object with the following methods. ``update(...)`` works with the list of individual terms `a_k` of *A*, and ``update_step(...)`` works with the list of partial sums `s_k` of *A*: .. code :: v, e = ...update([a_0, a_1,..., a_k]) v, e = ...update_psum([s_0, s_1,..., s_k]) ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)`` works with the partial sums `s_k`: .. code :: v, e = ...step(a_k) v, e = ...step_psum(s_k) *v* is the current estimate for *A*, and *e* is an error estimate which is simply the difference between the current estimate and the last estimate. One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``. **A word of caution** One can only hope for good results (i.e. convergence acceleration or resummation) if the `s_n` have some well defind asymptotic behavior for large `n` and are not erratic or random. Furthermore one usually needs very high working precision because of the numerical cancellation. If the working precision is insufficient, levin may produce silently numerical garbage. Furthermore even if the Levin-transformation converges, in the general case there is no proof that the result is mathematically sound. Only for very special classes of problems one can prove that the Levin-transformation converges to the expected result (for example Stieltjes-type integrals). Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison to Shanks/Wynn-epsilon, Richardson & co. In summary one can say that the Levin-transformation is powerful but unreliable and that it may need a copious amount of working precision. The Levin transform has several variants differing in the choice of weights. Some variants are better suited for the possible flavours of convergence behaviour of *A* than other variants: .. code :: convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon logarithmic + - + - linear + + + + alternating divergent + + + + "+" means the variant is suitable,"-" means the variant is not suitable; for comparison the Shanks/Wynn-epsilon transform is listed, too. The variant is controlled though the variant keyword (i.e. ``variant="u"``, ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice. Finally it is possible to use the Sidi-S transform instead of the Levin transform by using the keyword ``method='sidi'``. The Sidi-S transform works better than the Levin transformation for some divergent series (see the examples). Parameters: .. code :: method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation variant "u","t" or "v" chooses the weight variant. The Levin transform is also accessible through the nsum interface. ``method="l"`` or ``method="levin"`` select the normal Levin transform while ``method="sidi"`` selects the Sidi-S transform. The variant is in both cases selected through the levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the point where the Levin transform converges resulting in numerical overflow/garbage. For highly divergent series a copious amount of working precision must be chosen. **Examples** First we sum the zeta function:: >>> from mpmath import mp >>> mp.prec = 53 >>> eps = mp.mpf(mp.eps) >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision ... L = mp.levin(method = "levin", variant = "u") ... S, s, n = [], 0, 1 ... while 1: ... s += mp.one / (n * n) ... n += 1 ... S.append(s) ... v, e = L.update_psum(S) ... if e < eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> print(mp.chop(v - mp.pi ** 2 / 6)) 0.0 >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u") >>> print(mp.chop(v - w)) 0.0 Now we sum the zeta function outside its range of convergence (attention: This does not work at the negative integers!):: >>> eps = mp.mpf(mp.eps) >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision ... L = mp.levin(method = "levin", variant = "v") ... A, n = [], 1 ... while 1: ... s = mp.mpf(n) ** (2 + 3j) ... n += 1 ... A.append(s) ... v, e = L.update(A) ... if e < eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> print(mp.chop(v - mp.zeta(-2-3j))) 0.0 >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") >>> print(mp.chop(v - w)) 0.0 Now we sum the divergent asymptotic expansion of an integral related to the exponential integral (see also [2] p.373). The Sidi-S transform works best here:: >>> z = mp.mpf(10) >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral >>> eps = mp.mpf(mp.eps) >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation ... L = mp.levin(method = "sidi", variant = "t") ... n = 0 ... while 1: ... s = (-1)**n * mp.fac(n) * z ** (-n) ... v, e = L.step(s) ... n += 1 ... if e < eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> print(mp.chop(v - exact)) 0.0 >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") >>> print(mp.chop(v - w)) 0.0 Another highly divergent integral is also summable:: >>> z = mp.mpf(2) >>> eps = mp.mpf(mp.eps) >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series ... L = mp.levin(method = "levin", variant = "t") ... n, s = 0, 0 ... while 1: ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) ... n += 1 ... v, e = L.step_psum(s) ... if e < eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> print(mp.chop(v - exact)) 0.0 >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) >>> print(mp.chop(v - w)) 0.0 These examples run with 15-20 decimal digits precision. For higher precision the working precision must be raised. **Examples for nsum** Here we calculate Euler's constant as the constant term in the Laurent expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of the logarithmic convergence behaviour of the Dirichlet series for zeta:: >>> mp.dps = 30 >>> z = mp.mpf(10) ** (-10) >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z >>> print(mp.chop(a - mp.euler, tol = 1e-10)) 0.0 The Sidi-S transform performs excellently for the alternating series of `\log(2)`:: >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") >>> print(mp.chop(a - mp.log(2))) 0.0 Hypergeometric series can also be summed outside their range of convergence. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the point where the Levin transform converges resulting in numerical overflow/garbage:: >>> z = 2 + 1j >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) >>> print(mp.chop(exact-v)) 0.0 References: [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of Convergence and the Summation of Divergent Series" arXiv:math/0306302 [2] A. Sidi - "Pratical Extrapolation Methods" [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209 """ def __init__(self, method = "levin", variant = "u"): self.variant = variant self.n = 0 self.a0 = 0 self.theta = 1 self.A = [] self.B = [] self.last = 0 self.last_s = False if method == "levin": self.factor = self.factor_levin elif method == "sidi": self.factor = self.factor_sidi else: raise ValueError("levin: unknown method \"%s\"" % method) def factor_levin(self, i): # original levin # [1] p.50,e.7.5-7 (with n-j replaced by i) return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1) def factor_sidi(self, i): # sidi analogon to levin (factorial series) # [1] p.59,e.8.3-16 (with n-j replaced by i) return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3)) def run(self, s, a0, a1 = 0): if self.variant=="t": # levin t w=a0 elif self.variant=="u": # levin u w=a0*(self.theta+self.n) elif self.variant=="v": # levin v w=a0*a1/(a0-a1) else: assert False, "unknown variant" if w==0: raise ValueError("levin: zero weight") self.A.append(s/w) self.B.append(1/w) for i in range(self.n-1,-1,-1): if i==self.n-1: f=1 else: f=self.factor(i) self.A[i]=self.A[i+1]-f*self.A[i] self.B[i]=self.B[i+1]-f*self.B[i] self.n+=1 ########################################################################### def update_psum(self,S): """ This routine applies the convergence acceleration to the list of partial sums. A = sum(a_k, k = 0..infinity) s_n = sum(a_k, k = 0..n) v, e = ...update_psum([s_0, s_1,..., s_k]) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ if self.variant!="v": if self.n==0: self.run(S[0],S[0]) while self.n<len(S): self.run(S[self.n],S[self.n]-S[self.n-1]) else: if len(S)==1: self.last=0 return S[0],abs(S[0]) if self.n==0: self.a1=S[1]-S[0] self.run(S[0],S[0],self.a1) while self.n<len(S)-1: na1=S[self.n+1]-S[self.n] self.run(S[self.n],self.a1,na1) self.a1=na1 value=self.A[0]/self.B[0] err=abs(value-self.last) self.last=value return value,err def update(self,X): """ This routine applies the convergence acceleration to the list of individual terms. A = sum(a_k, k = 0..infinity) v, e = ...update([a_0, a_1,..., a_k]) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ if self.variant!="v": if self.n==0: self.s=X[0] self.run(self.s,X[0]) while self.n<len(X): self.s+=X[self.n] self.run(self.s,X[self.n]) else: if len(X)==1: self.last=0 return X[0],abs(X[0]) if self.n==0: self.s=X[0] self.run(self.s,X[0],X[1]) while self.n<len(X)-1: self.s+=X[self.n] self.run(self.s,X[self.n],X[self.n+1]) value=self.A[0]/self.B[0] err=abs(value-self.last) self.last=value return value,err ########################################################################### def step_psum(self,s): """ This routine applies the convergence acceleration to the partial sums. A = sum(a_k, k = 0..infinity) s_n = sum(a_k, k = 0..n) v, e = ...step_psum(s_k) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ if self.variant!="v": if self.n==0: self.last_s=s self.run(s,s) else: self.run(s,s-self.last_s) self.last_s=s else: if isinstance(self.last_s,bool): self.last_s=s self.last_w=s self.last=0 return s,abs(s) na1=s-self.last_s self.run(self.last_s,self.last_w,na1) self.last_w=na1 self.last_s=s value=self.A[0]/self.B[0] err=abs(value-self.last) self.last=value return value,err def step(self,x): """ This routine applies the convergence acceleration to the individual terms. A = sum(a_k, k = 0..infinity) v, e = ...step(a_k) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ if self.variant!="v": if self.n==0: self.s=x self.run(self.s,x) else: self.s+=x self.run(self.s,x) else: if isinstance(self.last_s,bool): self.last_s=x self.s=0 self.last=0 return x,abs(x) self.s+=self.last_s self.run(self.s,self.last_s,x) self.last_s=x value=self.A[0]/self.B[0] err=abs(value-self.last) self.last=value return value,err def levin(ctx, method = "levin", variant = "u"): L = levin_class(method = method, variant = variant) L.ctx = ctx return L levin.__doc__ = levin_class.__doc__ defun(levin) class cohen_alt_class: # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) """ This interface implements the convergence acceleration of alternating series as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration of Alternating Series". This series transformation works only well if the individual terms of the series have an alternating sign. It belongs to the class of linear series transformations (in contrast to the Shanks/Wynn-epsilon or Levin transform). This series transformation is also able to sum some types of divergent series. See the paper under which conditions this resummation is mathematical sound. Let *A* be the series we want to sum: .. math :: A = \sum_{k=0}^{\infty} a_k Let `s_n` be the partial sums of this series: .. math :: s_n = \sum_{k=0}^n a_k. **Interface** Calling ``cohen_alt`` returns an object with the following methods. Then ``update(...)`` works with the list of individual terms `a_k` and ``update_psum(...)`` works with the list of partial sums `s_k`: .. code :: v, e = ...update([a_0, a_1,..., a_k]) v, e = ...update_psum([s_0, s_1,..., s_k]) *v* is the current estimate for *A*, and *e* is an error estimate which is simply the difference between the current estimate and the last estimate. **Examples** Here we compute the alternating zeta function using ``update_psum``:: >>> from mpmath import mp >>> AC = mp.cohen_alt() >>> S, s, n = [], 0, 1 >>> while 1: ... s += -((-1) ** n) * mp.one / (n * n) ... n += 1 ... S.append(s) ... v, e = AC.update_psum(S) ... if e < mp.eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> print(mp.chop(v - mp.pi ** 2 / 12)) 0.0 Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`:: >>> A = [] >>> AC = mp.cohen_alt() >>> n = 1 >>> while 1: ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) ... A.append(-mp.loggamma(1 + mp.one / (2 * n))) ... n += 1 ... v, e = AC.update(A) ... if e < mp.eps: ... break ... if n > 1000: raise RuntimeError("iteration limit exceeded") >>> v = mp.exp(v) >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12)) 0.0 ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface:: >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") >>> print(mp.chop(v - mp.log(2))) 0.0 >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a") >>> print(mp.chop(v - mp.pi / 4)) 0.0 >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) 0.0 """ def __init__(self): self.last=0 def update(self, A): """ This routine applies the convergence acceleration to the list of individual terms. A = sum(a_k, k = 0..infinity) v, e = ...update([a_0, a_1,..., a_k]) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ n = len(A) d = (3 + self.ctx.sqrt(8)) ** n d = (d + 1 / d) / 2 b = -self.ctx.one c = -d s = 0 for k in xrange(n): c = b - c if k % 2 == 0: s = s + c * A[k] else: s = s - c * A[k] b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) value = s / d err = abs(value - self.last) self.last = value return value, err def update_psum(self, S): """ This routine applies the convergence acceleration to the list of partial sums. A = sum(a_k, k = 0..infinity) s_n = sum(a_k ,k = 0..n) v, e = ...update_psum([s_0, s_1,..., s_k]) output: v current estimate of the series A e an error estimate which is simply the difference between the current estimate and the last estimate. """ n = len(S) d = (3 + self.ctx.sqrt(8)) ** n d = (d + 1 / d) / 2 b = self.ctx.one s = 0 for k in xrange(n): b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one)) s += b * S[k] value = s / d err = abs(value - self.last) self.last = value return value, err def cohen_alt(ctx): L = cohen_alt_class() L.ctx = ctx return L cohen_alt.__doc__ = cohen_alt_class.__doc__ defun(cohen_alt) @defun def sumap(ctx, f, interval, integral=None, error=False): r""" Evaluates an infinite series of an analytic summand *f* using the Abel-Plana formula .. math :: \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) + i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt. Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`), the Abel-Plana formula does not require derivatives. However, it only works when `|f(it)-f(-it)|` does not increase too rapidly with `t`. **Examples** The Abel-Plana formula is particularly useful when the summand decreases like a power of `k`; for example when the sum is a pure zeta function:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> sumap(lambda k: 1/k**2.5, [1,inf]) 1.34148725725091717975677 >>> zeta(2.5) 1.34148725725091717975677 >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf]) (-3.385361068546473342286084 - 0.7432082105196321803869551j) >>> zeta(2.5+2.5j, 1+1j) (-3.385361068546473342286084 - 0.7432082105196321803869551j) If the series is alternating, numerical quadrature along the real line is likely to give poor results, so it is better to evaluate the first term symbolically whenever possible: >>> n=3; z=-0.75 >>> I = expint(n,-log(z)) >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I)) -0.6917036036904594510141448 >>> polylog(n,z) -0.6917036036904594510141448 """ prec = ctx.prec try: ctx.prec += 10 a, b = interval if b != ctx.inf: raise ValueError("b should be equal to ctx.inf") g = lambda x: f(x+a) if integral is None: i1, err1 = ctx.quad(g, [0,ctx.inf], error=True) else: i1, err1 = integral, 0 j = ctx.j p = ctx.pi * 2 if ctx._is_real_type(i1): h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t) else: h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t) i2, err2 = ctx.quad(h, [0,ctx.inf], error=True) err = err1+err2 v = i1+i2+0.5*g(ctx.mpf(0)) finally: ctx.prec = prec if error: return +v, err return +v @defun def sumem(ctx, f, interval, tol=None, reject=10, integral=None, adiffs=None, bdiffs=None, verbose=False, error=False, _fast_abort=False): r""" Uses the Euler-Maclaurin formula to compute an approximation accurate to within ``tol`` (which defaults to the present epsilon) of the sum .. math :: S = \sum_{k=a}^b f(k) where `(a,b)` are given by ``interval`` and `a` or `b` may be infinite. The approximation is .. math :: S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} + \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right). The last sum in the Euler-Maclaurin formula is not generally convergent (a notable exception is if `f` is a polynomial, in which case Euler-Maclaurin actually gives an exact result). The summation is stopped as soon as the quotient between two consecutive terms falls below *reject*. That is, by default (*reject* = 10), the summation is continued as long as each term adds at least one decimal. Although not convergent, convergence to a given tolerance can often be "forced" if `b = \infty` by summing up to `a+N` and then applying the Euler-Maclaurin formula to the sum over the range `(a+N+1, \ldots, \infty)`. This procedure is implemented by :func:`~mpmath.nsum`. By default numerical quadrature and differentiation is used. If the symbolic values of the integral and endpoint derivatives are known, it is more efficient to pass the value of the integral explicitly as ``integral`` and the derivatives explicitly as ``adiffs`` and ``bdiffs``. The derivatives should be given as iterables that yield `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`). **Examples** Summation of an infinite series, with automatic and symbolic integral and derivative values (the second should be much faster):: >>> from mpmath import * >>> mp.dps = 50; mp.pretty = True >>> sumem(lambda n: 1/n**2, [32, inf]) 0.03174336652030209012658168043874142714132886413417 >>> I = mpf(1)/32 >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999)) >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D) 0.03174336652030209012658168043874142714132886413417 An exact evaluation of a finite polynomial sum:: >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000]) 10500155000624963999742499550000.0 >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001))) 10500155000624963999742499550000 """ tol = tol or +ctx.eps interval = ctx._as_points(interval) a = ctx.convert(interval[0]) b = ctx.convert(interval[-1]) err = ctx.zero prev = 0 M = 10000 if a == ctx.ninf: adiffs = (0 for n in xrange(M)) else: adiffs = adiffs or ctx.diffs(f, a) if b == ctx.inf: bdiffs = (0 for n in xrange(M)) else: bdiffs = bdiffs or ctx.diffs(f, b) orig = ctx.prec #verbose = 1 try: ctx.prec += 10 s = ctx.zero for k, (da, db) in enumerate(izip(adiffs, bdiffs)): if k & 1: term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1) mag = abs(term) if verbose: print("term", k, "magnitude =", ctx.nstr(mag)) if k > 4 and mag < tol: s += term break elif k > 4 and abs(prev) / mag < reject: err += mag if _fast_abort: return [s, (s, err)][error] if verbose: print("Failed to converge") break else: s += term prev = term # Endpoint correction if a != ctx.ninf: s += f(a)/2 if b != ctx.inf: s += f(b)/2 # Tail integral if verbose: print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b))) if integral: s += integral else: integral, ierr = ctx.quad(f, interval, error=True) if verbose: print("Integration error:", ierr) s += integral err += ierr finally: ctx.prec = orig if error: return s, err else: return s @defun def adaptive_extrapolation(ctx, update, emfun, kwargs): option = kwargs.get if ctx._fixed_precision: tol = option('tol', ctx.eps*2**10) else: tol = option('tol', ctx.eps/2**10) verbose = option('verbose', False) maxterms = option('maxterms', ctx.dps*10) method = set(option('method', 'r+s').split('+')) skip = option('skip', 0) steps = iter(option('steps', xrange(10, 10**9, 10))) strict = option('strict') #steps = (10 for i in xrange(1000)) summer=[] if 'd' in method or 'direct' in method: TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False else: TRY_RICHARDSON = ('r' in method) or ('richardson' in method) TRY_SHANKS = ('s' in method) or ('shanks' in method) TRY_EULER_MACLAURIN = ('e' in method) or \ ('euler-maclaurin' in method) def init_levin(m): variant = kwargs.get("levin_variant", "u") if isinstance(variant, str): if variant == "all": variant = ["u", "v", "t"] else: variant = [variant] for s in variant: L = levin_class(method = m, variant = s) L.ctx = ctx L.name = m + "(" + s + ")" summer.append(L) if ('l' in method) or ('levin' in method): init_levin("levin") if ('sidi' in method): init_levin("sidi") if ('a' in method) or ('alternating' in method): L = cohen_alt_class() L.ctx = ctx L.name = "alternating" summer.append(L) last_richardson_value = 0 shanks_table = [] index = 0 step = 10 partial = [] best = ctx.zero orig = ctx.prec try: if 'workprec' in kwargs: ctx.prec = kwargs['workprec'] elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0: ctx.prec = (ctx.prec+10) * 4 else: ctx.prec += 30 while 1: if index >= maxterms: break # Get new batch of terms try: step = next(steps) except StopIteration: pass if verbose: print("-"*70) print("Adding terms #%i-#%i" % (index, index+step)) update(partial, xrange(index, index+step)) index += step # Check direct error best = partial[-1] error = abs(best - partial[-2]) if verbose: print("Direct error: %s" % ctx.nstr(error)) if error <= tol: return best # Check each extrapolation method if TRY_RICHARDSON: value, maxc = ctx.richardson(partial) # Convergence richardson_error = abs(value - last_richardson_value) if verbose: print("Richardson error: %s" % ctx.nstr(richardson_error)) # Convergence if richardson_error <= tol: return value last_richardson_value = value # Unreliable due to cancellation if ctx.eps*maxc > tol: if verbose: print("Ran out of precision for Richardson") TRY_RICHARDSON = False if richardson_error < error: error = richardson_error best = value if TRY_SHANKS: shanks_table = ctx.shanks(partial, shanks_table, randomized=True) row = shanks_table[-1] if len(row) == 2: est1 = row[-1] shanks_error = 0 else: est1, maxc, est2 = row[-1], abs(row[-2]), row[-3] shanks_error = abs(est1-est2) if verbose: print("Shanks error: %s" % ctx.nstr(shanks_error)) if shanks_error <= tol: return est1 if ctx.eps*maxc > tol: if verbose: print("Ran out of precision for Shanks") TRY_SHANKS = False if shanks_error < error: error = shanks_error best = est1 for L in summer: est, lerror = L.update_psum(partial) if verbose: print("%s error: %s" % (L.name, ctx.nstr(lerror))) if lerror <= tol: return est if lerror < error: error = lerror best = est if TRY_EULER_MACLAURIN: if ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])).ae(-1): if verbose: print ("NOT using Euler-Maclaurin: the series appears" " to be alternating, so numerical\n quadrature" " will most likely fail") TRY_EULER_MACLAURIN = False else: value, em_error = emfun(index, tol) value += partial[-1] if verbose: print("Euler-Maclaurin error: %s" % ctx.nstr(em_error)) if em_error <= tol: return value if em_error < error: best = value finally: ctx.prec = orig if strict: raise ctx.NoConvergence if verbose: print("Warning: failed to converge to target accuracy") return best @defun def nsum(ctx, f, *intervals, **options): r""" Computes the sum .. math :: S = \sum_{k=a}^b f(k) where `(a, b)` = *interval*, and where `a = -\infty` and/or `b = \infty` are allowed, or more generally .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n) if multiple intervals are given. Two examples of infinite series that can be summed by :func:`~mpmath.nsum`, where the first converges rapidly and the second converges slowly, are:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nsum(lambda n: 1/fac(n), [0, inf]) 2.71828182845905 >>> nsum(lambda n: 1/n**2, [1, inf]) 1.64493406684823 When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to accurately estimate the sums of slowly convergent series. If the series is finite, :func:`~mpmath.nsum` currently does not attempt to perform any extrapolation, and simply calls :func:`~mpmath.fsum`. Multidimensional infinite series are reduced to a single-dimensional series over expanding hypercubes; if both infinite and finite dimensions are present, the finite ranges are moved innermost. For more advanced control over the summation order, use nested calls to :func:`~mpmath.nsum`, or manually rewrite the sum as a single-dimensional series. **Options** *tol* Desired maximum final error. Defaults roughly to the epsilon of the working precision. *method* Which summation algorithm to use (described below). Default: ``'richardson+shanks'``. *maxterms* Cancel after at most this many terms. Default: 10*dps. *steps* An iterable giving the number of terms to add between each extrapolation attempt. The default sequence is [10, 20, 30, 40, ...]. For example, if you know that approximately 100 terms will be required, efficiency might be improved by setting this to [100, 10]. Then the first extrapolation will be performed after 100 terms, the second after 110, etc. *verbose* Print details about progress. *ignore* If enabled, any term that raises ``ArithmeticError`` or ``ValueError`` (e.g. through division by zero) is replaced by a zero. This is convenient for lattice sums with a singular term near the origin. **Methods** Unfortunately, an algorithm that can efficiently sum any infinite series does not exist. :func:`~mpmath.nsum` implements several different algorithms that each work well in different cases. The *method* keyword argument selects a method. The default method is ``'r+s'``, i.e. both Richardson extrapolation and Shanks transformation is attempted. A slower method that handles more cases is ``'r+s+e'``. For very high precision summation, or if the summation needs to be fast (for example if multiple sums need to be evaluated), it is a good idea to investigate which one method works best and only use that. ``'richardson'`` / ``'r'``: Uses Richardson extrapolation. Provides useful extrapolation when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)` for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for additional information. ``'shanks'`` / ``'s'``: Uses Shanks transformation. Typically provides useful extrapolation when `f(k) \sim c^k` or when successive terms alternate signs. Is able to sum some divergent series. See :func:`~mpmath.shanks` for additional information. ``'levin'`` / ``'l'``: Uses the Levin transformation. It performs better than the Shanks transformation for logarithmic convergent or alternating divergent series. The ``'levin_variant'``-keyword selects the variant. Valid choices are "u", "t", "v" and "all" whereby "all" uses all three u,t and v simultanously (This is good for performance comparison in conjunction with "verbose=True"). Instead of the Levin transform one can also use the Sidi-S transform by selecting the method ``'sidi'``. See :func:`~mpmath.levin` for additional details. ``'alternating'`` / ``'a'``: This is the convergence acceleration of alternating series developped by Cohen, Villegras and Zagier. See :func:`~mpmath.cohen_alt` for additional details. ``'euler-maclaurin'`` / ``'e'``: Uses the Euler-Maclaurin summation formula to approximate the remainder sum by an integral. This requires high-order numerical derivatives and numerical integration. The advantage of this algorithm is that it works regardless of the decay rate of `f`, as long as `f` is sufficiently smooth. See :func:`~mpmath.sumem` for additional information. ``'direct'`` / ``'d'``: Does not perform any extrapolation. This can be used (and should only be used for) rapidly convergent series. The summation automatically stops when the terms decrease below the target tolerance. **Basic examples** A finite sum:: >>> nsum(lambda k: 1/k, [1, 6]) 2.45 Summation of a series going to negative infinity and a doubly infinite series:: >>> nsum(lambda k: 1/k**2, [-inf, -1]) 1.64493406684823 >>> nsum(lambda k: 1/(1+k**2), [-inf, inf]) 3.15334809493716 :func:`~mpmath.nsum` handles sums of complex numbers:: >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf]) (1.6 + 0.8j) The following sum converges very rapidly, so it is most efficient to sum it by disabling convergence acceleration:: >>> mp.dps = 1000 >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf], ... method='direct') >>> b = (cos(1)+sin(1))/4 >>> abs(a-b) < mpf('1e-998') True **Examples with Richardson extrapolation** Richardson extrapolation works well for sums over rational functions, as well as their alternating counterparts:: >>> mp.dps = 50 >>> nsum(lambda k: 1 / k**3, [1, inf], ... method='richardson') 1.2020569031595942853997381615114499907649862923405 >>> zeta(3) 1.2020569031595942853997381615114499907649862923405 >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf], ... method='richardson') 2.9348022005446793094172454999380755676568497036204 >>> pi**2/2-2 2.9348022005446793094172454999380755676568497036204 >>> nsum(lambda k: (-1)**k / k**3, [1, inf], ... method='richardson') -0.90154267736969571404980362113358749307373971925537 >>> -3*zeta(3)/4 -0.90154267736969571404980362113358749307373971925538 **Examples with Shanks transformation** The Shanks transformation works well for geometric series and typically provides excellent acceleration for Taylor series near the border of their disk of convergence. Here we apply it to a series for `\log(2)`, which can be seen as the Taylor series for `\log(1+x)` with `x = 1`:: >>> nsum(lambda k: -(-1)**k/k, [1, inf], ... method='shanks') 0.69314718055994530941723212145817656807550013436025 >>> log(2) 0.69314718055994530941723212145817656807550013436025 Here we apply it to a slowly convergent geometric series:: >>> nsum(lambda k: mpf('0.995')**k, [0, inf], ... method='shanks') 200.0 Finally, Shanks' method works very well for alternating series where `f(k) = (-1)^k g(k)`, and often does so regardless of the exact decay rate of `g(k)`:: >>> mp.dps = 15 >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf], ... method='shanks') 0.765147024625408 >>> (2-sqrt(2))*zeta(1.5)/2 0.765147024625408 The following slowly convergent alternating series has no known closed-form value. Evaluating the sum a second time at higher precision indicates that the value is probably correct:: >>> nsum(lambda k: (-1)**k / log(k), [2, inf], ... method='shanks') 0.924299897222939 >>> mp.dps = 30 >>> nsum(lambda k: (-1)**k / log(k), [2, inf], ... method='shanks') 0.92429989722293885595957018136 **Examples with Levin transformation** The following example calculates Euler's constant as the constant term in the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow because of the logarithmic convergence behaviour of the Dirichlet series for zeta. >>> mp.dps = 30 >>> z = mp.mpf(10) ** (-10) >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z >>> print(mp.chop(a - mp.euler, tol = 1e-10)) 0.0 Now we sum the zeta function outside its range of convergence (attention: This does not work at the negative integers!): >>> mp.dps = 15 >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") >>> print(mp.chop(w - mp.zeta(-2-3j))) 0.0 The next example resummates an asymptotic series expansion of an integral related to the exponential integral. >>> mp.dps = 15 >>> z = mp.mpf(10) >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") >>> print(mp.chop(w - exact)) 0.0 Following highly divergent asymptotic expansion needs some care. Firstly we need copious amount of working precision. Secondly the stepsize must not be chosen to large, otherwise nsum may miss the point where the Levin transform converges and reach the point where only numerical garbage is produced due to numerical cancellation. >>> mp.dps = 15 >>> z = mp.mpf(2) >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) >>> print(mp.chop(w - exact)) 0.0 The hypergeoemtric function can also be summed outside its range of convergence: >>> mp.dps = 15 >>> z = 2 + 1j >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) >>> print(mp.chop(exact-v)) 0.0 **Examples with Cohen's alternating series resummation** The next example sums the alternating zeta function: >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") >>> print(mp.chop(v - mp.log(2))) 0.0 The derivate of the alternating zeta function outside its range of convergence: >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) 0.0 **Examples with Euler-Maclaurin summation** The sum in the following example has the wrong rate of convergence for either Richardson or Shanks to be effective. >>> f = lambda k: log(k)/k**2.5 >>> mp.dps = 15 >>> nsum(f, [1, inf], method='euler-maclaurin') 0.38734195032621 >>> -diff(zeta, 2.5) 0.38734195032621 Increasing ``steps`` improves speed at higher precision:: >>> mp.dps = 50 >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250]) 0.38734195032620997271199237593105101319948228874688 >>> -diff(zeta, 2.5) 0.38734195032620997271199237593105101319948228874688 **Divergent series** The Shanks transformation is able to sum some *divergent* series. In particular, it is often able to sum Taylor series beyond their radius of convergence (this is due to a relation between the Shanks transformation and Pade approximations; see :func:`~mpmath.pade` for an alternative way to evaluate divergent Taylor series). Furthermore the Levin-transform examples above contain some divergent series resummation. Here we apply it to `\log(1+x)` far outside the region of convergence:: >>> mp.dps = 50 >>> nsum(lambda k: -(-9)**k/k, [1, inf], ... method='shanks') 2.3025850929940456840179914546843642076011014886288 >>> log(10) 2.3025850929940456840179914546843642076011014886288 A particular type of divergent series that can be summed using the Shanks transformation is geometric series. The result is the same as using the closed-form formula for an infinite geometric series:: >>> mp.dps = 15 >>> for n in range(-8, 8): ... if n == 1: ... continue ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n), ... nsum(lambda k: n**k, [0, inf], method='shanks'))) ... -8.0 0.111111111111111 0.111111111111111 -7.0 0.125 0.125 -6.0 0.142857142857143 0.142857142857143 -5.0 0.166666666666667 0.166666666666667 -4.0 0.2 0.2 -3.0 0.25 0.25 -2.0 0.333333333333333 0.333333333333333 -1.0 0.5 0.5 0.0 1.0 1.0 2.0 -1.0 -1.0 3.0 -0.5 -0.5 4.0 -0.333333333333333 -0.333333333333333 5.0 -0.25 -0.25 6.0 -0.2 -0.2 7.0 -0.166666666666667 -0.166666666666667 **Multidimensional sums** Any combination of finite and infinite ranges is allowed for the summation indices:: >>> mp.dps = 15 >>> nsum(lambda x,y: x+y, [2,3], [4,5]) 28.0 >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf]) 6.0 >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3]) 6.0 >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4]) 7.0 >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf]) 7.0 >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf]) 7.0 Some nice examples of double series with analytic solutions or reductions to single-dimensional series (see [1]):: >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf]) 1.60669515241529 >>> nsum(lambda n: 1/(2**n-1), [1,inf]) 1.60669515241529 >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf]) 0.278070510848213 >>> pi*(pi-3*ln2)/12 0.278070510848213 >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf]) 0.129319852864168 >>> altzeta(2) - altzeta(1) 0.129319852864168 >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf]) 0.0790756439455825 >>> altzeta(3) - altzeta(2) 0.0790756439455825 >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)), ... [1,inf], [1,inf]) 0.28125 >>> mpf(9)/32 0.28125 >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j), ... [1,inf], [1,inf], workprec=400) 1.64493406684823 >>> zeta(2) 1.64493406684823 A hard example of a multidimensional sum is the Madelung constant in three dimensions (see [2]). The defining sum converges very slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to obtain an accurate value through convergence acceleration. The second evaluation below uses a much more efficient, rapidly convergent 2D sum:: >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5, ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True) -1.74756459463318 >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \ ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf]) -1.74756459463318 Another example of a lattice sum in 2D:: >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf], ... [-inf,inf], ignore=True) -2.1775860903036 >>> -pi*ln2 -2.1775860903036 An example of an Eisenstein series:: >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf], ... ignore=True) (3.1512120021539 + 0.0j) **References** 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html, 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html """ infinite, g = standardize(ctx, f, intervals, options) if not infinite: return +g() def update(partial_sums, indices): if partial_sums: psum = partial_sums[-1] else: psum = ctx.zero for k in indices: psum = psum + g(ctx.mpf(k)) partial_sums.append(psum) prec = ctx.prec def emfun(point, tol): workprec = ctx.prec ctx.prec = prec + 10 v = ctx.sumem(g, [point, ctx.inf], tol, error=1) ctx.prec = workprec return v return +ctx.adaptive_extrapolation(update, emfun, options) def wrapsafe(f): def g(*args): try: return f(*args) except (ArithmeticError, ValueError): return 0 return g def standardize(ctx, f, intervals, options): if options.get("ignore"): f = wrapsafe(f) finite = [] infinite = [] for k, points in enumerate(intervals): a, b = ctx._as_points(points) if b < a: return False, (lambda: ctx.zero) if a == ctx.ninf or b == ctx.inf: infinite.append((k, (a,b))) else: finite.append((k, (int(a), int(b)))) if finite: f = fold_finite(ctx, f, finite) if not infinite: return False, lambda: f(*([0]*len(intervals))) if infinite: f = standardize_infinite(ctx, f, infinite) f = fold_infinite(ctx, f, infinite) args = [0] * len(intervals) d = infinite[0][0] def g(k): args[d] = k return f(*args) return True, g # backwards compatible itertools.product def cartesian_product(args): pools = map(tuple, args) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] for prod in result: yield tuple(prod) def fold_finite(ctx, f, intervals): if not intervals: return f indices = [v[0] for v in intervals] points = [v[1] for v in intervals] ranges = [xrange(a, b+1) for (a,b) in points] def g(*args): args = list(args) s = ctx.zero for xs in cartesian_product(ranges): for dim, x in zip(indices, xs): args[dim] = ctx.mpf(x) s += f(*args) return s #print "Folded finite", indices return g # Standardize each interval to [0,inf] def standardize_infinite(ctx, f, intervals): if not intervals: return f dim, [a,b] = intervals[-1] if a == ctx.ninf: if b == ctx.inf: def g(*args): args = list(args) k = args[dim] if k: s = f(*args) args[dim] = -k s += f(*args) return s else: return f(*args) else: def g(*args): args = list(args) args[dim] = b - args[dim] return f(*args) else: def g(*args): args = list(args) args[dim] += a return f(*args) #print "Standardized infinity along dimension", dim, a, b return standardize_infinite(ctx, g, intervals[:-1]) def fold_infinite(ctx, f, intervals): if len(intervals) < 2: return f dim1 = intervals[-2][0] dim2 = intervals[-1][0] # Assume intervals are [0,inf] x [0,inf] x ... def g(*args): args = list(args) #args.insert(dim2, None) n = int(args[dim1]) s = ctx.zero #y = ctx.mpf(n) args[dim2] = ctx.mpf(n) #y for x in xrange(n+1): args[dim1] = ctx.mpf(x) s += f(*args) args[dim1] = ctx.mpf(n) #ctx.mpf(n) for y in xrange(n): args[dim2] = ctx.mpf(y) s += f(*args) return s #print "Folded infinite from", len(intervals), "to", (len(intervals)-1) return fold_infinite(ctx, g, intervals[:-1]) @defun def nprod(ctx, f, interval, nsum=False, **kwargs): r""" Computes the product .. math :: P = \prod_{k=a}^b f(k) where `(a, b)` = *interval*, and where `a = -\infty` and/or `b = \infty` are allowed. By default, :func:`~mpmath.nprod` uses the same extrapolation methods as :func:`~mpmath.nsum`, except applied to the partial products rather than partial sums, and the same keyword options as for :func:`~mpmath.nsum` are supported. If ``nsum=True``, the product is instead computed via :func:`~mpmath.nsum` as .. math :: P = \exp\left( \sum_{k=a}^b \log(f(k)) \right). This is slower, but can sometimes yield better results. It is also required (and used automatically) when Euler-Maclaurin summation is requested. **Examples** A simple finite product:: >>> from mpmath import * >>> mp.dps = 25; mp.pretty = True >>> nprod(lambda k: k, [1, 4]) 24.0 A large number of infinite products have known exact values, and can therefore be used as a reference. Most of the following examples are taken from MathWorld [1]. A few infinite products with simple values are:: >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf]) 3.141592653589793238462643 >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf]) 2.0 >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf]) 0.6666666666666666666666667 >>> nprod(lambda k: (1-1/k**2), [2, inf]) 0.5 Next, several more infinite products with more complicated values:: >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6) 5.180668317897115748416626 5.180668317897115748416626 >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi) 0.2720290549821331629502366 0.2720290549821331629502366 >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf]) 0.8480540493529003921296502 >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi)) 0.8480540493529003921296502 >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf]) 1.848936182858244485224927 >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi 1.848936182858244485224927 >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi) 0.9190194775937444301739244 0.9190194775937444301739244 >>> nprod(lambda k: (1-1/k**6), [2, inf]) 0.9826842777421925183244759 >>> (1+cosh(pi*sqrt(3)))/(12*pi**2) 0.9826842777421925183244759 >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi) 1.838038955187488860347849 1.838038955187488860347849 >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf]) 1.447255926890365298959138 >>> exp(1+euler/2)/sqrt(2*pi) 1.447255926890365298959138 The following two products are equivalent and can be evaluated in terms of a Jacobi theta function. Pi can be replaced by any value (as long as convergence is preserved):: >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf]) 0.3838451207481672404778686 >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf]) 0.3838451207481672404778686 >>> jtheta(4,0,1/pi) 0.3838451207481672404778686 This product does not have a known closed form value:: >>> nprod(lambda k: (1-1/2**k), [1, inf]) 0.2887880950866024212788997 A product taken from `-\infty`:: >>> nprod(lambda k: 1-k**(-3), [-inf,-2]) 0.8093965973662901095786805 >>> cosh(pi*sqrt(3)/2)/(3*pi) 0.8093965973662901095786805 A doubly infinite product:: >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf]) 23.41432688231864337420035 >>> exp(pi/tanh(pi)) 23.41432688231864337420035 A product requiring the use of Euler-Maclaurin summation to compute an accurate value:: >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e') 0.696155111336231052898125 **References** 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html """ if nsum or ('e' in kwargs.get('method', '')): orig = ctx.prec try: # TODO: we are evaluating log(1+eps) -> eps, which is # inaccurate. This currently works because nsum greatly # increases the working precision. But we should be # more intelligent and handle the precision here. ctx.prec += 10 v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs) finally: ctx.prec = orig return +ctx.exp(v) a, b = ctx._as_points(interval) if a == ctx.ninf: if b == ctx.inf: return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs) return ctx.nprod(f, [-b, ctx.inf], **kwargs) elif b != ctx.inf: return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1)) a = int(a) def update(partial_products, indices): if partial_products: pprod = partial_products[-1] else: pprod = ctx.one for k in indices: pprod = pprod * f(a + ctx.mpf(k)) partial_products.append(pprod) return +ctx.adaptive_extrapolation(update, None, kwargs) @defun def limit(ctx, f, x, direction=1, exp=False, **kwargs): r""" Computes an estimate of the limit .. math :: \lim_{t \to x} f(t) where `x` may be finite or infinite. For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for consecutive integer values of `n`, where the approach direction `d` may be specified using the *direction* keyword argument. For infinite `x`, :func:`~mpmath.limit` evaluates values of `f(\mathrm{sign}(x) \cdot n)`. If the approach to the limit is not sufficiently fast to give an accurate estimate directly, :func:`~mpmath.limit` attempts to find the limit using Richardson extrapolation or the Shanks transformation. You can select between these methods using the *method* keyword (see documentation of :func:`~mpmath.nsum` for more information). **Options** The following options are available with essentially the same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*, *steps*, *verbose*. If the option *exp=True* is set, `f` will be sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots` instead of the linearly spaced points `n = 1, 2, 3, \ldots`. This can sometimes improve the rate of convergence so that :func:`~mpmath.limit` may return a more accurate answer (and faster). However, do note that this can only be used if `f` supports fast and accurate evaluation for arguments that are extremely close to the limit point (or if infinite, very large arguments). **Examples** A basic evaluation of a removable singularity:: >>> from mpmath import * >>> mp.dps = 30; mp.pretty = True >>> limit(lambda x: (x-sin(x))/x**3, 0) 0.166666666666666666666666666667 Computing the exponential function using its limit definition:: >>> limit(lambda n: (1+3/n)**n, inf) 20.0855369231876677409285296546 >>> exp(3) 20.0855369231876677409285296546 A limit for `\pi`:: >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2 >>> limit(f, inf) 3.14159265358979323846264338328 Calculating the coefficient in Stirling's formula:: >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf) 2.50662827463100050241576528481 >>> sqrt(2*pi) 2.50662827463100050241576528481 Evaluating Euler's constant `\gamma` using the limit representation .. math :: \gamma = \lim_{n \rightarrow \infty } \left[ \left( \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right] (which converges notoriously slowly):: >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n) >>> limit(f, inf) 0.577215664901532860606512090082 >>> +euler 0.577215664901532860606512090082 With default settings, the following limit converges too slowly to be evaluated accurately. Changing to exponential sampling however gives a perfect result:: >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x) >>> limit(f, inf) 0.992831158558330281129249686491 >>> limit(f, inf, exp=True) 1.0 """ if ctx.isinf(x): direction = ctx.sign(x) g = lambda k: f(ctx.mpf(k+1)*direction) else: direction *= ctx.one g = lambda k: f(x + direction/(k+1)) if exp: h = g g = lambda k: h(2**k) def update(values, indices): for k in indices: values.append(g(k+1)) # XXX: steps used by nsum don't work well if not 'steps' in kwargs: kwargs['steps'] = [10] return +ctx.adaptive_extrapolation(update, None, kwargs)
73,288
33.635633
169
py